diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index b541376f4de..755ca395b68 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -17,7 +17,8 @@ go.sum @ajm188 @deepthi @harshit-gangal @mattlord @rohit-nayak-ps @systay @froui
/go/cache @vmg
/go/cmd @ajm188 @deepthi
/go/cmd/vtadmin @ajm188 @notfelineit
-/go/cmd/vtctldclient @ajm188 @notfelineit
+/go/cmd/vtctldclient @ajm188 @mattlord
+/go/cmd/vtctldclient/command/vreplication @mattlord @rohit-nayak-ps
/go/internal/flag @ajm188 @rohit-nayak-ps
/go/mysql @harshit-gangal @systay @mattlord
/go/pools @deepthi @harshit-gangal
diff --git a/.github/workflows/assign_milestone.yml b/.github/workflows/assign_milestone.yml
index 3aae05bacc0..7c56f45728f 100644
--- a/.github/workflows/assign_milestone.yml
+++ b/.github/workflows/assign_milestone.yml
@@ -20,7 +20,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Checkout code
uses: actions/checkout@v3
diff --git a/.github/workflows/auto_approve_pr.yml b/.github/workflows/auto_approve_pr.yml
new file mode 100644
index 00000000000..552f1ec2e68
--- /dev/null
+++ b/.github/workflows/auto_approve_pr.yml
@@ -0,0 +1,23 @@
+name: Auto Approval of Bot Pull Requests
+on:
+ pull_request:
+ types: [opened, reopened]
+
+jobs:
+ auto_approve:
+ name: Auto Approve Pull Request
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v3
+
+ - name: Auto Approve Pull Request
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ run: |
+ # here we are checking that the PR has been created by the vitess-bot[bot] account and that it is not a draft
+ # if there is a merge conflict in the backport, the PR will always be created as a draft, meaning we can rely
+ # on checking whether or not the PR is a draft
+ if [[ "${{github.event.pull_request.user.login}}" == "vitess-bot[bot]" ]] && [[ "${{github.event.pull_request.draft}}" == "false" ]]; then
+ gh pr review ${{ github.event.pull_request.number }} --approve
+ fi
diff --git a/.github/workflows/check_make_vtadmin_authz_testgen.yml b/.github/workflows/check_make_vtadmin_authz_testgen.yml
index 98f5f4fd767..8f9199e7658 100644
--- a/.github/workflows/check_make_vtadmin_authz_testgen.yml
+++ b/.github/workflows/check_make_vtadmin_authz_testgen.yml
@@ -50,7 +50,7 @@ jobs:
uses: actions/setup-go@v4
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.vtadmin_changes == 'true'
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.vtadmin_changes == 'true'
diff --git a/.github/workflows/check_make_vtadmin_web_proto.yml b/.github/workflows/check_make_vtadmin_web_proto.yml
index 5f3fd821893..5f3302fc97c 100644
--- a/.github/workflows/check_make_vtadmin_web_proto.yml
+++ b/.github/workflows/check_make_vtadmin_web_proto.yml
@@ -52,14 +52,14 @@ jobs:
uses: actions/setup-go@v4
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.proto_changes == 'true'
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Setup Node
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.proto_changes == 'true'
uses: actions/setup-node@v3
with:
# node-version should match package.json
- node-version: '16.19.0'
+ node-version: '18.16.0'
- name: Install npm dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.proto_changes == 'true'
diff --git a/.github/workflows/cluster_endtoend_12.yml b/.github/workflows/cluster_endtoend_12.yml
index f27268263d5..5ce650f1ea6 100644
--- a/.github/workflows/cluster_endtoend_12.yml
+++ b/.github/workflows/cluster_endtoend_12.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (12)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,7 +111,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,7 +120,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -124,16 +131,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard 12 | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_13.yml b/.github/workflows/cluster_endtoend_13.yml
index f08387a9921..fa98916736f 100644
--- a/.github/workflows/cluster_endtoend_13.yml
+++ b/.github/workflows/cluster_endtoend_13.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (13)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,7 +111,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,7 +120,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -124,16 +131,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard 13 | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_15.yml b/.github/workflows/cluster_endtoend_15.yml
index ad3082a4f69..2501f26ab58 100644
--- a/.github/workflows/cluster_endtoend_15.yml
+++ b/.github/workflows/cluster_endtoend_15.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (15)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,7 +111,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,7 +120,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -124,16 +131,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard 15 | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_18.yml b/.github/workflows/cluster_endtoend_18.yml
index d4826b6f2ed..234e672afb0 100644
--- a/.github/workflows/cluster_endtoend_18.yml
+++ b/.github/workflows/cluster_endtoend_18.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (18)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -109,7 +116,7 @@ jobs:
make tools
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -118,7 +125,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -129,16 +136,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard 18 | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_21.yml b/.github/workflows/cluster_endtoend_21.yml
index 13c861a0ee7..feeedcd46b8 100644
--- a/.github/workflows/cluster_endtoend_21.yml
+++ b/.github/workflows/cluster_endtoend_21.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (21)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,7 +111,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,7 +120,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -124,16 +131,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard 21 | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_22.yml b/.github/workflows/cluster_endtoend_22.yml
index 59367df8278..f4cee992fb2 100644
--- a/.github/workflows/cluster_endtoend_22.yml
+++ b/.github/workflows/cluster_endtoend_22.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (22)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,7 +111,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,7 +120,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -124,16 +131,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard 22 | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_backup_pitr.yml b/.github/workflows/cluster_endtoend_backup_pitr.yml
index 7e70dabe84f..9b97e08f7b2 100644
--- a/.github/workflows/cluster_endtoend_backup_pitr.yml
+++ b/.github/workflows/cluster_endtoend_backup_pitr.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (backup_pitr)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -107,7 +114,7 @@ jobs:
sudo apt-get install percona-xtrabackup-80 lz4
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -116,7 +123,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -127,16 +134,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard backup_pitr | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_backup_pitr_mysql57.yml b/.github/workflows/cluster_endtoend_backup_pitr_mysql57.yml
index 8d93e57728d..210dfc9ba95 100644
--- a/.github/workflows/cluster_endtoend_backup_pitr_mysql57.yml
+++ b/.github/workflows/cluster_endtoend_backup_pitr_mysql57.yml
@@ -20,7 +20,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (backup_pitr) mysql57
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -40,6 +40,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -68,7 +75,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -131,7 +138,7 @@ jobs:
fi
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -140,7 +147,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -151,16 +158,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard backup_pitr | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_backup_pitr_xtrabackup.yml b/.github/workflows/cluster_endtoend_backup_pitr_xtrabackup.yml
new file mode 100644
index 00000000000..6cad7922321
--- /dev/null
+++ b/.github/workflows/cluster_endtoend_backup_pitr_xtrabackup.yml
@@ -0,0 +1,151 @@
+# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
+
+name: Cluster (backup_pitr_xtrabackup)
+on: [push, pull_request]
+concurrency:
+ group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (backup_pitr_xtrabackup)')
+ cancel-in-progress: true
+
+permissions: read-all
+
+env:
+ LAUNCHABLE_ORGANIZATION: "vitess"
+ LAUNCHABLE_WORKSPACE: "vitess-app"
+ GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
+
+jobs:
+ build:
+ name: Run endtoend tests on Cluster (backup_pitr_xtrabackup)
+ runs-on: gh-hosted-runners-4cores-1
+
+ steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
+ - name: Check if workflow needs to be skipped
+ id: skip-workflow
+ run: |
+ skip='false'
+ if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
+ skip='true'
+ fi
+ echo Skip ${skip}
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
+ - name: Check out code
+ if: steps.skip-workflow.outputs.skip-workflow == 'false'
+ uses: actions/checkout@v3
+
+ - name: Check for changes in relevant files
+ if: steps.skip-workflow.outputs.skip-workflow == 'false'
+ uses: frouioui/paths-filter@main
+ id: changes
+ with:
+ token: ''
+ filters: |
+ end_to_end:
+ - 'go/**/*.go'
+ - 'test.go'
+ - 'Makefile'
+ - 'build.env'
+ - 'go.sum'
+ - 'go.mod'
+ - 'proto/*.proto'
+ - 'tools/**'
+ - 'config/**'
+ - 'bootstrap.sh'
+ - '.github/workflows/cluster_endtoend_backup_pitr_xtrabackup.yml'
+
+ - name: Set up Go
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ uses: actions/setup-go@v4
+ with:
+ go-version: 1.21.3
+
+ - name: Set up python
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ uses: actions/setup-python@v4
+
+ - name: Tune the OS
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ run: |
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
+ # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
+ echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
+ sudo sysctl -p /etc/sysctl.conf
+
+ - name: Get dependencies
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ run: |
+
+ # Setup Percona Server for MySQL 8.0
+ sudo apt-get update
+ sudo apt-get install -y lsb-release gnupg2 curl
+ wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
+ sudo percona-release setup ps80
+ sudo apt-get update
+
+ # Install everything else we need, and configure
+ sudo apt-get install -y percona-server-server percona-server-client make unzip g++ etcd git wget eatmydata xz-utils libncurses5
+
+ sudo service mysql stop
+ sudo service etcd stop
+ sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
+ sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
+ go mod download
+
+ # install JUnit report formatter
+ go install github.com/vitessio/go-junit-report@HEAD
+
+ sudo apt-get install -y percona-xtrabackup-80 lz4
+
+ - name: Setup launchable dependencies
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ run: |
+ # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
+ pip3 install --user launchable~=1.0 > /dev/null
+
+ # verify that launchable setup is all correct.
+ launchable verify || true
+
+ # Tell Launchable about the build you are producing and testing
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
+
+ - name: Run cluster endtoend test
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ timeout-minutes: 45
+ run: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -exo pipefail
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard backup_pitr_xtrabackup | tee -a output.txt | go-junit-report -set-exit-code > report.xml
+
+ - name: Print test output and Record test result in launchable if PR is not a draft
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
+ run: |
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
+
+ # print test output
+ cat output.txt
diff --git a/.github/workflows/cluster_endtoend_backup_pitr_xtrabackup_mysql57.yml b/.github/workflows/cluster_endtoend_backup_pitr_xtrabackup_mysql57.yml
new file mode 100644
index 00000000000..b895a19a8d0
--- /dev/null
+++ b/.github/workflows/cluster_endtoend_backup_pitr_xtrabackup_mysql57.yml
@@ -0,0 +1,175 @@
+# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
+
+name: Cluster (backup_pitr_xtrabackup) mysql57
+on: [push, pull_request]
+concurrency:
+ group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (backup_pitr_xtrabackup) mysql57')
+ cancel-in-progress: true
+
+permissions: read-all
+
+env:
+ LAUNCHABLE_ORGANIZATION: "vitess"
+ LAUNCHABLE_WORKSPACE: "vitess-app"
+ GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
+
+ # This is used if we need to pin the xtrabackup version used in tests.
+ # If this is NOT set then the latest version available will be used.
+ #XTRABACKUP_VERSION: "2.4.24-1"
+
+jobs:
+ build:
+ name: Run endtoend tests on Cluster (backup_pitr_xtrabackup) mysql57
+ runs-on: gh-hosted-runners-4cores-1
+
+ steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
+ - name: Check if workflow needs to be skipped
+ id: skip-workflow
+ run: |
+ skip='false'
+ if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
+ skip='true'
+ fi
+ echo Skip ${skip}
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
+ - name: Check out code
+ if: steps.skip-workflow.outputs.skip-workflow == 'false'
+ uses: actions/checkout@v3
+
+ - name: Check for changes in relevant files
+ if: steps.skip-workflow.outputs.skip-workflow == 'false'
+ uses: frouioui/paths-filter@main
+ id: changes
+ with:
+ token: ''
+ filters: |
+ end_to_end:
+ - 'go/**/*.go'
+ - 'test.go'
+ - 'Makefile'
+ - 'build.env'
+ - 'go.sum'
+ - 'go.mod'
+ - 'proto/*.proto'
+ - 'tools/**'
+ - 'config/**'
+ - 'bootstrap.sh'
+ - '.github/workflows/cluster_endtoend_backup_pitr_xtrabackup_mysql57.yml'
+
+ - name: Set up Go
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ uses: actions/setup-go@v4
+ with:
+ go-version: 1.21.3
+
+ - name: Set up python
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ uses: actions/setup-python@v4
+
+ - name: Tune the OS
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ run: |
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
+ # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
+ echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
+ sudo sysctl -p /etc/sysctl.conf
+
+ - name: Get dependencies
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ run: |
+ sudo apt-get update
+
+ # Uninstall any previously installed MySQL first
+ sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
+ sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
+
+ sudo systemctl stop apparmor
+ sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common
+ sudo apt-get -y autoremove
+ sudo apt-get -y autoclean
+ sudo deluser mysql
+ sudo rm -rf /var/lib/mysql
+ sudo rm -rf /etc/mysql
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ # Bionic packages are still compatible for Jammy since there's no MySQL 5.7
+ # packages for Jammy.
+ echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
+ sudo apt-get update
+ sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7* libncurses5
+
+ sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata
+ sudo service mysql stop
+ sudo service etcd stop
+
+ # install JUnit report formatter
+ go install github.com/vitessio/go-junit-report@HEAD
+
+ wget "https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb"
+ sudo apt-get install -y gnupg2
+ sudo dpkg -i "percona-release_latest.$(lsb_release -sc)_all.deb"
+ sudo apt-get update
+ if [[ -n $XTRABACKUP_VERSION ]]; then
+ debfile="percona-xtrabackup-24_$XTRABACKUP_VERSION.$(lsb_release -sc)_amd64.deb"
+ wget "https://repo.percona.com/pxb-24/apt/pool/main/p/percona-xtrabackup-24/$debfile"
+ sudo apt install -y "./$debfile"
+ else
+ sudo apt-get install -y percona-xtrabackup-24
+ fi
+
+ - name: Setup launchable dependencies
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ run: |
+ # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
+ pip3 install --user launchable~=1.0 > /dev/null
+
+ # verify that launchable setup is all correct.
+ launchable verify || true
+
+ # Tell Launchable about the build you are producing and testing
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
+
+ - name: Run cluster endtoend test
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ timeout-minutes: 45
+ run: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -exo pipefail
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard backup_pitr_xtrabackup | tee -a output.txt | go-junit-report -set-exit-code > report.xml
+
+ - name: Print test output and Record test result in launchable if PR is not a draft
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
+ run: |
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
+
+ # print test output
+ cat output.txt
diff --git a/.github/workflows/cluster_endtoend_ers_prs_newfeatures_heavy.yml b/.github/workflows/cluster_endtoend_ers_prs_newfeatures_heavy.yml
index b46206057b6..f65d2625c28 100644
--- a/.github/workflows/cluster_endtoend_ers_prs_newfeatures_heavy.yml
+++ b/.github/workflows/cluster_endtoend_ers_prs_newfeatures_heavy.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (ers_prs_newfeatures_heavy)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,7 +111,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,7 +120,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -124,7 +131,7 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# Increase our open file descriptor limit as we could hit this
ulimit -n 65536
@@ -147,11 +154,13 @@ jobs:
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard ers_prs_newfeatures_heavy | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_mysql80.yml b/.github/workflows/cluster_endtoend_mysql80.yml
index a1d1151aa5a..5c3739aafd0 100644
--- a/.github/workflows/cluster_endtoend_mysql80.yml
+++ b/.github/workflows/cluster_endtoend_mysql80.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (mysql80)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,7 +111,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,7 +120,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -124,16 +131,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard mysql80 | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_mysql_server_vault.yml b/.github/workflows/cluster_endtoend_mysql_server_vault.yml
index d808fe29dc2..793e7372309 100644
--- a/.github/workflows/cluster_endtoend_mysql_server_vault.yml
+++ b/.github/workflows/cluster_endtoend_mysql_server_vault.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (mysql_server_vault)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -109,7 +116,7 @@ jobs:
make tools
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -118,7 +125,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -129,16 +136,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard mysql_server_vault | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_onlineddl_ghost.yml b/.github/workflows/cluster_endtoend_onlineddl_ghost.yml
index ea414171ac1..af61a6a5059 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_ghost.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_ghost.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (onlineddl_ghost)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -65,7 +72,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -105,7 +112,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -114,7 +121,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -125,16 +132,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard onlineddl_ghost | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_onlineddl_ghost_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_ghost_mysql57.yml
index 4c348478573..43dc184c204 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_ghost_mysql57.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_ghost_mysql57.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (onlineddl_ghost) mysql57
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -65,7 +72,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -116,7 +123,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -125,7 +132,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -136,16 +143,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard onlineddl_ghost | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_onlineddl_revert.yml b/.github/workflows/cluster_endtoend_onlineddl_revert.yml
index 7b43c85def4..d2c6e23ee86 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_revert.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_revert.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (onlineddl_revert)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -65,7 +72,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -105,7 +112,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -114,7 +121,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -125,16 +132,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard onlineddl_revert | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_onlineddl_revert_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_revert_mysql57.yml
index 3a3ec4c4168..ac93c1ac532 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_revert_mysql57.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_revert_mysql57.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (onlineddl_revert) mysql57
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -65,7 +72,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -116,7 +123,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -125,7 +132,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -136,16 +143,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard onlineddl_revert | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_onlineddl_scheduler.yml b/.github/workflows/cluster_endtoend_onlineddl_scheduler.yml
index b698c74544b..38031f4441e 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_scheduler.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_scheduler.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (onlineddl_scheduler)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -65,7 +72,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -105,7 +112,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -114,7 +121,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -125,16 +132,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard onlineddl_scheduler | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_onlineddl_scheduler_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_scheduler_mysql57.yml
index d3401f81ce3..0a205266c4f 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_scheduler_mysql57.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_scheduler_mysql57.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (onlineddl_scheduler) mysql57
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -65,7 +72,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -116,7 +123,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -125,7 +132,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -136,16 +143,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard onlineddl_scheduler | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml
index 363ed20249f..d83fb7010b8 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (onlineddl_vrepl)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-16cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -65,7 +72,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -105,7 +112,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -114,7 +121,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -125,7 +132,7 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
cat <<-EOF>>./config/mycnf/mysql80.cnf
binlog-transaction-compression=ON
@@ -134,11 +141,13 @@ jobs:
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard onlineddl_vrepl | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_mysql57.yml
index 75f4493ae28..a941c9faef0 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_mysql57.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_mysql57.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (onlineddl_vrepl) mysql57
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-16cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -65,7 +72,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -116,7 +123,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -125,7 +132,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -136,16 +143,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard onlineddl_vrepl | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml
index 9769be1cfd2..a51cb6c33fe 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (onlineddl_vrepl_stress)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-16cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -65,7 +72,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -105,7 +112,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -114,7 +121,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -125,7 +132,7 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
cat <<-EOF>>./config/mycnf/mysql80.cnf
binlog-transaction-compression=ON
@@ -134,11 +141,13 @@ jobs:
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard onlineddl_vrepl_stress | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_mysql57.yml
index 63f5a8e4b07..77626919a89 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_mysql57.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_mysql57.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (onlineddl_vrepl_stress) mysql57
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-16cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -65,7 +72,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -116,7 +123,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -125,7 +132,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -136,16 +143,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard onlineddl_vrepl_stress | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite.yml
index d1eab9ca6ed..1230fcd3518 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (onlineddl_vrepl_stress_suite)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-16cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -65,7 +72,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -105,7 +112,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -114,7 +121,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -125,7 +132,7 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
cat <<-EOF>>./config/mycnf/mysql80.cnf
binlog-transaction-compression=ON
@@ -134,11 +141,13 @@ jobs:
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard onlineddl_vrepl_stress_suite | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite_mysql57.yml
index 19ba98829a0..86ef8eec019 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite_mysql57.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress_suite_mysql57.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (onlineddl_vrepl_stress_suite) mysql57
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-16cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -65,7 +72,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -116,7 +123,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -125,7 +132,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -136,16 +143,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard onlineddl_vrepl_stress_suite | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml
index dd9d6cf45aa..34e521d648f 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (onlineddl_vrepl_suite)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-16cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -65,7 +72,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -105,7 +112,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -114,7 +121,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -125,7 +132,7 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
cat <<-EOF>>./config/mycnf/mysql80.cnf
binlog-transaction-compression=ON
@@ -134,11 +141,13 @@ jobs:
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard onlineddl_vrepl_suite | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite_mysql57.yml
index 45156849e79..a400ea99677 100644
--- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite_mysql57.yml
+++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite_mysql57.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (onlineddl_vrepl_suite) mysql57
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-16cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -65,7 +72,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -116,7 +123,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -125,7 +132,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -136,16 +143,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard onlineddl_vrepl_suite | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_schemadiff_vrepl.yml b/.github/workflows/cluster_endtoend_schemadiff_vrepl.yml
index 7b169b16e01..68a25ee46ec 100644
--- a/.github/workflows/cluster_endtoend_schemadiff_vrepl.yml
+++ b/.github/workflows/cluster_endtoend_schemadiff_vrepl.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (schemadiff_vrepl)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -65,7 +72,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -105,7 +112,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -114,7 +121,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -125,7 +132,7 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
cat <<-EOF>>./config/mycnf/mysql80.cnf
binlog-transaction-compression=ON
@@ -134,11 +141,13 @@ jobs:
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard schemadiff_vrepl | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_schemadiff_vrepl_mysql57.yml b/.github/workflows/cluster_endtoend_schemadiff_vrepl_mysql57.yml
index 8c2d2dc9dc1..ba57948d162 100644
--- a/.github/workflows/cluster_endtoend_schemadiff_vrepl_mysql57.yml
+++ b/.github/workflows/cluster_endtoend_schemadiff_vrepl_mysql57.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (schemadiff_vrepl) mysql57
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -65,7 +72,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -116,7 +123,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -125,7 +132,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -136,16 +143,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard schemadiff_vrepl | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_tabletmanager_consul.yml b/.github/workflows/cluster_endtoend_tabletmanager_consul.yml
index c21ae5be321..0fe0d4e18da 100644
--- a/.github/workflows/cluster_endtoend_tabletmanager_consul.yml
+++ b/.github/workflows/cluster_endtoend_tabletmanager_consul.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (tabletmanager_consul)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -109,7 +116,7 @@ jobs:
make tools
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -118,7 +125,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -129,16 +136,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard tabletmanager_consul | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_tabletmanager_tablegc.yml b/.github/workflows/cluster_endtoend_tabletmanager_tablegc.yml
index 0e81f901b79..5af0e2ff852 100644
--- a/.github/workflows/cluster_endtoend_tabletmanager_tablegc.yml
+++ b/.github/workflows/cluster_endtoend_tabletmanager_tablegc.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (tabletmanager_tablegc)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,7 +111,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,7 +120,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -124,16 +131,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard tabletmanager_tablegc | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_tabletmanager_tablegc_mysql57.yml b/.github/workflows/cluster_endtoend_tabletmanager_tablegc_mysql57.yml
index b1bba866718..e1ae8eeb69c 100644
--- a/.github/workflows/cluster_endtoend_tabletmanager_tablegc_mysql57.yml
+++ b/.github/workflows/cluster_endtoend_tabletmanager_tablegc_mysql57.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (tabletmanager_tablegc) mysql57
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -115,7 +122,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -124,7 +131,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -135,16 +142,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard tabletmanager_tablegc | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_tabletmanager_throttler_topo.yml b/.github/workflows/cluster_endtoend_tabletmanager_throttler_topo.yml
index e8928bd2e32..8b6826f257c 100644
--- a/.github/workflows/cluster_endtoend_tabletmanager_throttler_topo.yml
+++ b/.github/workflows/cluster_endtoend_tabletmanager_throttler_topo.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (tabletmanager_throttler_topo)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,7 +111,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,7 +120,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -124,16 +131,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard tabletmanager_throttler_topo | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_topo_connection_cache.yml b/.github/workflows/cluster_endtoend_topo_connection_cache.yml
index 68e0aef2216..bb59336df48 100644
--- a/.github/workflows/cluster_endtoend_topo_connection_cache.yml
+++ b/.github/workflows/cluster_endtoend_topo_connection_cache.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (topo_connection_cache)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,7 +111,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,7 +120,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -124,16 +131,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard topo_connection_cache | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_vreplication_across_db_versions.yml b/.github/workflows/cluster_endtoend_vreplication_across_db_versions.yml
index f424f6757b1..ec3d101629e 100644
--- a/.github/workflows/cluster_endtoend_vreplication_across_db_versions.yml
+++ b/.github/workflows/cluster_endtoend_vreplication_across_db_versions.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vreplication_across_db_versions)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,7 +111,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,7 +120,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -124,7 +131,7 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# Increase our open file descriptor limit as we could hit this
ulimit -n 65536
@@ -151,11 +158,13 @@ jobs:
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard vreplication_across_db_versions | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_vreplication_basic.yml b/.github/workflows/cluster_endtoend_vreplication_basic.yml
index 16f03764d0d..ea6219bf869 100644
--- a/.github/workflows/cluster_endtoend_vreplication_basic.yml
+++ b/.github/workflows/cluster_endtoend_vreplication_basic.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vreplication_basic)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-16cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,7 +111,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,7 +120,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -124,7 +131,7 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# Increase our open file descriptor limit as we could hit this
ulimit -n 65536
@@ -151,11 +158,13 @@ jobs:
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard vreplication_basic | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_vreplication_cellalias.yml b/.github/workflows/cluster_endtoend_vreplication_cellalias.yml
index ea842718f62..5ef46750668 100644
--- a/.github/workflows/cluster_endtoend_vreplication_cellalias.yml
+++ b/.github/workflows/cluster_endtoend_vreplication_cellalias.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vreplication_cellalias)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,7 +111,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,7 +120,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -124,7 +131,7 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# Increase our open file descriptor limit as we could hit this
ulimit -n 65536
@@ -151,11 +158,13 @@ jobs:
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard vreplication_cellalias | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_vreplication_migrate_vdiff2_convert_tz.yml b/.github/workflows/cluster_endtoend_vreplication_migrate_vdiff2_convert_tz.yml
index 92f85f7e7ae..d8961314a46 100644
--- a/.github/workflows/cluster_endtoend_vreplication_migrate_vdiff2_convert_tz.yml
+++ b/.github/workflows/cluster_endtoend_vreplication_migrate_vdiff2_convert_tz.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vreplication_migrate_vdiff2_convert_tz)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-16cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,7 +111,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,7 +120,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -124,7 +131,7 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# Increase our open file descriptor limit as we could hit this
ulimit -n 65536
@@ -151,11 +158,13 @@ jobs:
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard vreplication_migrate_vdiff2_convert_tz | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_vreplication_multicell.yml b/.github/workflows/cluster_endtoend_vreplication_multicell.yml
index d4f68493680..328c062e1d0 100644
--- a/.github/workflows/cluster_endtoend_vreplication_multicell.yml
+++ b/.github/workflows/cluster_endtoend_vreplication_multicell.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vreplication_multicell)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,7 +111,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,7 +120,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -124,7 +131,7 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# Increase our open file descriptor limit as we could hit this
ulimit -n 65536
@@ -151,11 +158,13 @@ jobs:
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard vreplication_multicell | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_tabletmanager_throttler_custom_config.yml b/.github/workflows/cluster_endtoend_vreplication_partial_movetables_basic.yml
similarity index 70%
rename from .github/workflows/cluster_endtoend_tabletmanager_throttler_custom_config.yml
rename to .github/workflows/cluster_endtoend_vreplication_partial_movetables_basic.yml
index 60d117c49d9..28dca240332 100644
--- a/.github/workflows/cluster_endtoend_tabletmanager_throttler_custom_config.yml
+++ b/.github/workflows/cluster_endtoend_vreplication_partial_movetables_basic.yml
@@ -1,9 +1,9 @@
# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
-name: Cluster (tabletmanager_throttler_custom_config)
+name: Cluster (vreplication_partial_movetables_basic)
on: [push, pull_request]
concurrency:
- group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (tabletmanager_throttler_custom_config)')
+ group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vreplication_partial_movetables_basic)')
cancel-in-progress: true
permissions: read-all
@@ -15,8 +15,8 @@ env:
jobs:
build:
- name: Run endtoend tests on Cluster (tabletmanager_throttler_custom_config)
- runs-on: ubuntu-22.04
+ name: Run endtoend tests on Cluster (vreplication_partial_movetables_basic)
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -58,13 +65,13 @@ jobs:
- 'tools/**'
- 'config/**'
- 'bootstrap.sh'
- - '.github/workflows/cluster_endtoend_tabletmanager_throttler_custom_config.yml'
+ - '.github/workflows/cluster_endtoend_vreplication_partial_movetables_basic.yml'
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,7 +111,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,7 +120,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -124,16 +131,40 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
-
+ set -exo pipefail
+
+ # Increase our open file descriptor limit as we could hit this
+ ulimit -n 65536
+ cat <<-EOF>>./config/mycnf/mysql80.cnf
+ innodb_buffer_pool_dump_at_shutdown=OFF
+ innodb_buffer_pool_in_core_file=OFF
+ innodb_buffer_pool_load_at_startup=OFF
+ innodb_buffer_pool_size=64M
+ innodb_doublewrite=OFF
+ innodb_flush_log_at_trx_commit=0
+ innodb_flush_method=O_DIRECT
+ innodb_numa_interleave=ON
+ innodb_adaptive_hash_index=OFF
+ sync_binlog=0
+ sync_relay_log=0
+ performance_schema=OFF
+ slow-query-log=OFF
+ EOF
+
+ cat <<-EOF>>./config/mycnf/mysql80.cnf
+ binlog-transaction-compression=ON
+ EOF
+
# run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard tabletmanager_throttler_custom_config | tee -a output.txt | go-junit-report -set-exit-code > report.xml
+ eatmydata -- go run test.go -docker=false -follow -shard vreplication_partial_movetables_basic | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_vreplication_partial_movetables_sequences.yml b/.github/workflows/cluster_endtoend_vreplication_partial_movetables_sequences.yml
new file mode 100644
index 00000000000..c002a72d1e7
--- /dev/null
+++ b/.github/workflows/cluster_endtoend_vreplication_partial_movetables_sequences.yml
@@ -0,0 +1,170 @@
+# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
+
+name: Cluster (vreplication_partial_movetables_sequences)
+on: [push, pull_request]
+concurrency:
+ group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vreplication_partial_movetables_sequences)')
+ cancel-in-progress: true
+
+permissions: read-all
+
+env:
+ LAUNCHABLE_ORGANIZATION: "vitess"
+ LAUNCHABLE_WORKSPACE: "vitess-app"
+ GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}"
+
+jobs:
+ build:
+ name: Run endtoend tests on Cluster (vreplication_partial_movetables_sequences)
+ runs-on: gh-hosted-runners-4cores-1
+
+ steps:
+ - name: Skip CI
+ run: |
+ if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
+ echo "skipping CI due to the 'Skip CI' label"
+ exit 1
+ fi
+
+ - name: Check if workflow needs to be skipped
+ id: skip-workflow
+ run: |
+ skip='false'
+ if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
+ skip='true'
+ fi
+ echo Skip ${skip}
+ echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
+ - name: Check out code
+ if: steps.skip-workflow.outputs.skip-workflow == 'false'
+ uses: actions/checkout@v3
+
+ - name: Check for changes in relevant files
+ if: steps.skip-workflow.outputs.skip-workflow == 'false'
+ uses: frouioui/paths-filter@main
+ id: changes
+ with:
+ token: ''
+ filters: |
+ end_to_end:
+ - 'go/**/*.go'
+ - 'test.go'
+ - 'Makefile'
+ - 'build.env'
+ - 'go.sum'
+ - 'go.mod'
+ - 'proto/*.proto'
+ - 'tools/**'
+ - 'config/**'
+ - 'bootstrap.sh'
+ - '.github/workflows/cluster_endtoend_vreplication_partial_movetables_sequences.yml'
+
+ - name: Set up Go
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ uses: actions/setup-go@v4
+ with:
+ go-version: 1.21.3
+
+ - name: Set up python
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ uses: actions/setup-python@v4
+
+ - name: Tune the OS
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ run: |
+ # Limit local port range to not use ports that overlap with server side
+ # ports that we listen on.
+ sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
+ # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio
+ echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf
+ sudo sysctl -p /etc/sysctl.conf
+
+ - name: Get dependencies
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ run: |
+
+ # Get key to latest MySQL repo
+ sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29
+ # Setup MySQL 8.0
+ wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb
+ echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
+ sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
+ sudo apt-get update
+ # Install everything else we need, and configure
+ sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5
+
+ sudo service mysql stop
+ sudo service etcd stop
+ sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
+ sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld
+ go mod download
+
+ # install JUnit report formatter
+ go install github.com/vitessio/go-junit-report@HEAD
+
+ - name: Setup launchable dependencies
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ run: |
+ # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
+ pip3 install --user launchable~=1.0 > /dev/null
+
+ # verify that launchable setup is all correct.
+ launchable verify || true
+
+ # Tell Launchable about the build you are producing and testing
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
+
+ - name: Run cluster endtoend test
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
+ timeout-minutes: 45
+ run: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ source build.env
+
+ set -exo pipefail
+
+ # Increase our open file descriptor limit as we could hit this
+ ulimit -n 65536
+ cat <<-EOF>>./config/mycnf/mysql80.cnf
+ innodb_buffer_pool_dump_at_shutdown=OFF
+ innodb_buffer_pool_in_core_file=OFF
+ innodb_buffer_pool_load_at_startup=OFF
+ innodb_buffer_pool_size=64M
+ innodb_doublewrite=OFF
+ innodb_flush_log_at_trx_commit=0
+ innodb_flush_method=O_DIRECT
+ innodb_numa_interleave=ON
+ innodb_adaptive_hash_index=OFF
+ sync_binlog=0
+ sync_relay_log=0
+ performance_schema=OFF
+ slow-query-log=OFF
+ EOF
+
+ cat <<-EOF>>./config/mycnf/mysql80.cnf
+ binlog-transaction-compression=ON
+ EOF
+
+ # run the tests however you normally do, then produce a JUnit XML file
+ eatmydata -- go run test.go -docker=false -follow -shard vreplication_partial_movetables_sequences | tee -a output.txt | go-junit-report -set-exit-code > report.xml
+
+ - name: Print test output and Record test result in launchable if PR is not a draft
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
+ run: |
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
+
+ # print test output
+ cat output.txt
diff --git a/.github/workflows/cluster_endtoend_vreplication_v2.yml b/.github/workflows/cluster_endtoend_vreplication_v2.yml
index 8faa40f1e7d..9229b34a5bf 100644
--- a/.github/workflows/cluster_endtoend_vreplication_v2.yml
+++ b/.github/workflows/cluster_endtoend_vreplication_v2.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vreplication_v2)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,7 +111,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,7 +120,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -124,7 +131,7 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# Increase our open file descriptor limit as we could hit this
ulimit -n 65536
@@ -151,11 +158,13 @@ jobs:
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard vreplication_v2 | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_vstream_failover.yml b/.github/workflows/cluster_endtoend_vstream_failover.yml
index f8d6871a539..a620b8caad9 100644
--- a/.github/workflows/cluster_endtoend_vstream_failover.yml
+++ b/.github/workflows/cluster_endtoend_vstream_failover.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vstream_failover)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,7 +111,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,7 +120,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -124,16 +131,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard vstream_failover | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_vstream_stoponreshard_false.yml b/.github/workflows/cluster_endtoend_vstream_stoponreshard_false.yml
index de34b4824da..5db27dad710 100644
--- a/.github/workflows/cluster_endtoend_vstream_stoponreshard_false.yml
+++ b/.github/workflows/cluster_endtoend_vstream_stoponreshard_false.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vstream_stoponreshard_false)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,7 +111,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,7 +120,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -124,16 +131,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard vstream_stoponreshard_false | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_vstream_stoponreshard_true.yml b/.github/workflows/cluster_endtoend_vstream_stoponreshard_true.yml
index 1e4fca5fcba..32e7685bf8f 100644
--- a/.github/workflows/cluster_endtoend_vstream_stoponreshard_true.yml
+++ b/.github/workflows/cluster_endtoend_vstream_stoponreshard_true.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vstream_stoponreshard_true)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,7 +111,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,7 +120,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -124,16 +131,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard vstream_stoponreshard_true | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_vstream_with_keyspaces_to_watch.yml b/.github/workflows/cluster_endtoend_vstream_with_keyspaces_to_watch.yml
index eebc7fef114..27620919d99 100644
--- a/.github/workflows/cluster_endtoend_vstream_with_keyspaces_to_watch.yml
+++ b/.github/workflows/cluster_endtoend_vstream_with_keyspaces_to_watch.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vstream_with_keyspaces_to_watch)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,7 +111,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,7 +120,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -124,16 +131,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard vstream_with_keyspaces_to_watch | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_vtbackup.yml b/.github/workflows/cluster_endtoend_vtbackup.yml
index 2f41e175096..8f2dcd3768b 100644
--- a/.github/workflows/cluster_endtoend_vtbackup.yml
+++ b/.github/workflows/cluster_endtoend_vtbackup.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vtbackup)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,7 +111,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,7 +120,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -124,16 +131,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard vtbackup | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_vtctlbackup_sharded_clustertest_heavy.yml b/.github/workflows/cluster_endtoend_vtctlbackup_sharded_clustertest_heavy.yml
index 75580e5b858..aad84a910c6 100644
--- a/.github/workflows/cluster_endtoend_vtctlbackup_sharded_clustertest_heavy.yml
+++ b/.github/workflows/cluster_endtoend_vtctlbackup_sharded_clustertest_heavy.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vtctlbackup_sharded_clustertest_heavy)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,7 +111,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,7 +120,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -124,7 +131,7 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# Increase our open file descriptor limit as we could hit this
ulimit -n 65536
@@ -147,11 +154,13 @@ jobs:
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard vtctlbackup_sharded_clustertest_heavy | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_vtgate_concurrentdml.yml b/.github/workflows/cluster_endtoend_vtgate_concurrentdml.yml
index bb673daa144..19bb9efe86c 100644
--- a/.github/workflows/cluster_endtoend_vtgate_concurrentdml.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_concurrentdml.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_concurrentdml)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,7 +111,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,7 +120,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -124,16 +131,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard vtgate_concurrentdml | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_tabletmanager_throttler.yml b/.github/workflows/cluster_endtoend_vtgate_foreignkey_stress.yml
similarity index 78%
rename from .github/workflows/cluster_endtoend_tabletmanager_throttler.yml
rename to .github/workflows/cluster_endtoend_vtgate_foreignkey_stress.yml
index 813d7932f62..e2824c5844d 100644
--- a/.github/workflows/cluster_endtoend_tabletmanager_throttler.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_foreignkey_stress.yml
@@ -1,9 +1,9 @@
# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows"
-name: Cluster (tabletmanager_throttler)
+name: Cluster (vtgate_foreignkey_stress)
on: [push, pull_request]
concurrency:
- group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (tabletmanager_throttler)')
+ group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (vtgate_foreignkey_stress)')
cancel-in-progress: true
permissions: read-all
@@ -15,8 +15,8 @@ env:
jobs:
build:
- name: Run endtoend tests on Cluster (tabletmanager_throttler)
- runs-on: ubuntu-22.04
+ name: Run endtoend tests on Cluster (vtgate_foreignkey_stress)
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -58,13 +65,13 @@ jobs:
- 'tools/**'
- 'config/**'
- 'bootstrap.sh'
- - '.github/workflows/cluster_endtoend_tabletmanager_throttler.yml'
+ - '.github/workflows/cluster_endtoend_vtgate_foreignkey_stress.yml'
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,7 +111,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,7 +120,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -124,16 +131,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
- eatmydata -- go run test.go -docker=false -follow -shard tabletmanager_throttler | tee -a output.txt | go-junit-report -set-exit-code > report.xml
+ eatmydata -- go run test.go -docker=false -follow -shard vtgate_foreignkey_stress | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_vtgate_gen4.yml b/.github/workflows/cluster_endtoend_vtgate_gen4.yml
index 2d9d59566e0..205de4b5e68 100644
--- a/.github/workflows/cluster_endtoend_vtgate_gen4.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_gen4.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_gen4)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,7 +111,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,7 +120,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -124,16 +131,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard vtgate_gen4 | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_vtgate_general_heavy.yml b/.github/workflows/cluster_endtoend_vtgate_general_heavy.yml
index 30a1405afa4..98d59d60aee 100644
--- a/.github/workflows/cluster_endtoend_vtgate_general_heavy.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_general_heavy.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_general_heavy)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,7 +111,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,7 +120,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -124,7 +131,7 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# Increase our open file descriptor limit as we could hit this
ulimit -n 65536
@@ -147,11 +154,13 @@ jobs:
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard vtgate_general_heavy | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_vtgate_godriver.yml b/.github/workflows/cluster_endtoend_vtgate_godriver.yml
index 1162fc3e282..2f4082d10d4 100644
--- a/.github/workflows/cluster_endtoend_vtgate_godriver.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_godriver.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_godriver)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,7 +111,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,7 +120,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -124,16 +131,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard vtgate_godriver | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_vtgate_partial_keyspace.yml b/.github/workflows/cluster_endtoend_vtgate_partial_keyspace.yml
index bd5048a9aa8..4a9f6e227fb 100644
--- a/.github/workflows/cluster_endtoend_vtgate_partial_keyspace.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_partial_keyspace.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_partial_keyspace)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,7 +111,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,7 +120,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -124,16 +131,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard vtgate_partial_keyspace -partial-keyspace=true | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_vtgate_queries.yml b/.github/workflows/cluster_endtoend_vtgate_queries.yml
index 41c0871c6b9..6d41d922fc4 100644
--- a/.github/workflows/cluster_endtoend_vtgate_queries.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_queries.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_queries)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,7 +111,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,7 +120,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -124,16 +131,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard vtgate_queries | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_vtgate_readafterwrite.yml b/.github/workflows/cluster_endtoend_vtgate_readafterwrite.yml
index 678dc12099b..028e1492029 100644
--- a/.github/workflows/cluster_endtoend_vtgate_readafterwrite.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_readafterwrite.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_readafterwrite)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,7 +111,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,7 +120,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -124,16 +131,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard vtgate_readafterwrite | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_vtgate_reservedconn.yml b/.github/workflows/cluster_endtoend_vtgate_reservedconn.yml
index 7c1577d4dd7..5972472402e 100644
--- a/.github/workflows/cluster_endtoend_vtgate_reservedconn.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_reservedconn.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_reservedconn)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,7 +111,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,7 +120,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -124,16 +131,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard vtgate_reservedconn | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_vtgate_schema.yml b/.github/workflows/cluster_endtoend_vtgate_schema.yml
index c85d8386ec5..68a2bd697be 100644
--- a/.github/workflows/cluster_endtoend_vtgate_schema.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_schema.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_schema)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,7 +111,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,7 +120,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -124,16 +131,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard vtgate_schema | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_vtgate_schema_tracker.yml b/.github/workflows/cluster_endtoend_vtgate_schema_tracker.yml
index 3e6fe5f6059..1c5d1e675f8 100644
--- a/.github/workflows/cluster_endtoend_vtgate_schema_tracker.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_schema_tracker.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_schema_tracker)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,7 +111,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,7 +120,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -124,16 +131,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard vtgate_schema_tracker | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_vtgate_tablet_healthcheck_cache.yml b/.github/workflows/cluster_endtoend_vtgate_tablet_healthcheck_cache.yml
index 04ac695c5e4..26adb43fd74 100644
--- a/.github/workflows/cluster_endtoend_vtgate_tablet_healthcheck_cache.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_tablet_healthcheck_cache.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_tablet_healthcheck_cache)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,7 +111,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,7 +120,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -124,16 +131,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard vtgate_tablet_healthcheck_cache | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_vtgate_topo.yml b/.github/workflows/cluster_endtoend_vtgate_topo.yml
index 06a199e2585..49945a607d8 100644
--- a/.github/workflows/cluster_endtoend_vtgate_topo.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_topo.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_topo)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,7 +111,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,7 +120,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -124,16 +131,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard vtgate_topo | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_vtgate_topo_consul.yml b/.github/workflows/cluster_endtoend_vtgate_topo_consul.yml
index d2064784715..ee72650dcbd 100644
--- a/.github/workflows/cluster_endtoend_vtgate_topo_consul.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_topo_consul.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_topo_consul)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -109,7 +116,7 @@ jobs:
make tools
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -118,7 +125,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -129,16 +136,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard vtgate_topo_consul | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_vtgate_topo_etcd.yml b/.github/workflows/cluster_endtoend_vtgate_topo_etcd.yml
index 9550cc1e0c3..4051373d9aa 100644
--- a/.github/workflows/cluster_endtoend_vtgate_topo_etcd.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_topo_etcd.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_topo_etcd)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,7 +111,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,7 +120,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -124,16 +131,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard vtgate_topo_etcd | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_vtgate_transaction.yml b/.github/workflows/cluster_endtoend_vtgate_transaction.yml
index 19de405dadc..b7cc848692f 100644
--- a/.github/workflows/cluster_endtoend_vtgate_transaction.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_transaction.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_transaction)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,7 +111,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,7 +120,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -124,16 +131,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard vtgate_transaction | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_vtgate_unsharded.yml b/.github/workflows/cluster_endtoend_vtgate_unsharded.yml
index a50581b44dd..b6359682993 100644
--- a/.github/workflows/cluster_endtoend_vtgate_unsharded.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_unsharded.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_unsharded)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,7 +111,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,7 +120,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -124,16 +131,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard vtgate_unsharded | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_vtgate_vindex_heavy.yml b/.github/workflows/cluster_endtoend_vtgate_vindex_heavy.yml
index f5a8bde45f5..83fb2b2d829 100644
--- a/.github/workflows/cluster_endtoend_vtgate_vindex_heavy.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_vindex_heavy.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_vindex_heavy)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,7 +111,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,7 +120,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -124,7 +131,7 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# Increase our open file descriptor limit as we could hit this
ulimit -n 65536
@@ -147,11 +154,13 @@ jobs:
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard vtgate_vindex_heavy | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_vtgate_vschema.yml b/.github/workflows/cluster_endtoend_vtgate_vschema.yml
index 85aa4c14eb8..4c2f3b2637d 100644
--- a/.github/workflows/cluster_endtoend_vtgate_vschema.yml
+++ b/.github/workflows/cluster_endtoend_vtgate_vschema.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vtgate_vschema)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,7 +111,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,7 +120,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -124,16 +131,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard vtgate_vschema | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_vtorc.yml b/.github/workflows/cluster_endtoend_vtorc.yml
index 41e33da32aa..872576ab8b5 100644
--- a/.github/workflows/cluster_endtoend_vtorc.yml
+++ b/.github/workflows/cluster_endtoend_vtorc.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vtorc)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,7 +111,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,7 +120,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -124,16 +131,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard vtorc | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_vtorc_mysql57.yml b/.github/workflows/cluster_endtoend_vtorc_mysql57.yml
index 0e46ecd4972..72baf7940b6 100644
--- a/.github/workflows/cluster_endtoend_vtorc_mysql57.yml
+++ b/.github/workflows/cluster_endtoend_vtorc_mysql57.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vtorc) mysql57
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -115,7 +122,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -124,7 +131,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -135,16 +142,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard vtorc | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_vttablet_prscomplex.yml b/.github/workflows/cluster_endtoend_vttablet_prscomplex.yml
index 4ce2061a0df..b56d4dc61a5 100644
--- a/.github/workflows/cluster_endtoend_vttablet_prscomplex.yml
+++ b/.github/workflows/cluster_endtoend_vttablet_prscomplex.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (vttablet_prscomplex)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,7 +111,7 @@ jobs:
go install github.com/vitessio/go-junit-report@HEAD
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -113,7 +120,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -124,16 +131,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard vttablet_prscomplex | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_xb_backup.yml b/.github/workflows/cluster_endtoend_xb_backup.yml
index 4130f7b4ee0..f24baaf31af 100644
--- a/.github/workflows/cluster_endtoend_xb_backup.yml
+++ b/.github/workflows/cluster_endtoend_xb_backup.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (xb_backup)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,10 +111,10 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- sudo apt-get install percona-xtrabackup-80 lz4
+ sudo apt-get install -y percona-xtrabackup-80 lz4
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -116,7 +123,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -127,16 +134,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard xb_backup | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_xb_backup_mysql57.yml b/.github/workflows/cluster_endtoend_xb_backup_mysql57.yml
index 49707ea16dc..b85628a0dbe 100644
--- a/.github/workflows/cluster_endtoend_xb_backup_mysql57.yml
+++ b/.github/workflows/cluster_endtoend_xb_backup_mysql57.yml
@@ -20,7 +20,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (xb_backup) mysql57
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -40,6 +40,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -68,7 +75,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -131,7 +138,7 @@ jobs:
fi
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -140,7 +147,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -151,16 +158,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard xb_backup | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_xb_recovery.yml b/.github/workflows/cluster_endtoend_xb_recovery.yml
index 70e98a618a4..3fbe34b0569 100644
--- a/.github/workflows/cluster_endtoend_xb_recovery.yml
+++ b/.github/workflows/cluster_endtoend_xb_recovery.yml
@@ -16,7 +16,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (xb_recovery)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -104,10 +111,10 @@ jobs:
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
- sudo apt-get install percona-xtrabackup-80 lz4
+ sudo apt-get install -y percona-xtrabackup-80 lz4
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -116,7 +123,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -127,16 +134,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard xb_recovery | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/cluster_endtoend_xb_recovery_mysql57.yml b/.github/workflows/cluster_endtoend_xb_recovery_mysql57.yml
index 1982c1ad1ea..aaa2b034105 100644
--- a/.github/workflows/cluster_endtoend_xb_recovery_mysql57.yml
+++ b/.github/workflows/cluster_endtoend_xb_recovery_mysql57.yml
@@ -20,7 +20,7 @@ env:
jobs:
build:
name: Run endtoend tests on Cluster (xb_recovery) mysql57
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -40,6 +40,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -68,7 +75,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -131,7 +138,7 @@ jobs:
fi
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -140,7 +147,7 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run cluster endtoend test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -151,16 +158,18 @@ jobs:
export VTDATAROOT="/tmp/"
source build.env
- set -x
+ set -exo pipefail
# run the tests however you normally do, then produce a JUnit XML file
eatmydata -- go run test.go -docker=false -follow -shard xb_recovery | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/codeql_analysis.yml b/.github/workflows/codeql_analysis.yml
index 5cef0644623..8bafc62213a 100644
--- a/.github/workflows/codeql_analysis.yml
+++ b/.github/workflows/codeql_analysis.yml
@@ -44,7 +44,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Get base dependencies
run: |
@@ -79,7 +79,7 @@ jobs:
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get update
- sudo apt-get install percona-xtrabackup-24
+ sudo apt-get install -y percona-xtrabackup-24
- name: Building binaries
timeout-minutes: 30
diff --git a/.github/workflows/create_release.yml b/.github/workflows/create_release.yml
index 7c2c75b2afe..52c90038680 100644
--- a/.github/workflows/create_release.yml
+++ b/.github/workflows/create_release.yml
@@ -20,7 +20,12 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
+
+ - name: Setup node
+ uses: actions/setup-node@v3
+ with:
+ node-version: '18.16.0'
- name: Tune the OS
run: |
diff --git a/.github/workflows/docker_test_cluster_10.yml b/.github/workflows/docker_test_cluster_10.yml
index 606ae666820..3ff9a2a6e74 100644
--- a/.github/workflows/docker_test_cluster_10.yml
+++ b/.github/workflows/docker_test_cluster_10.yml
@@ -5,7 +5,7 @@ jobs:
build:
name: Docker Test Cluster 10
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -54,7 +54,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/docker_test_cluster_25.yml b/.github/workflows/docker_test_cluster_25.yml
index 02f984e90b1..e01caf200b1 100644
--- a/.github/workflows/docker_test_cluster_25.yml
+++ b/.github/workflows/docker_test_cluster_25.yml
@@ -5,7 +5,7 @@ jobs:
build:
name: Docker Test Cluster 25
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -54,7 +54,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
diff --git a/.github/workflows/e2e_race.yml b/.github/workflows/e2e_race.yml
index 2c75ecf0cdb..0d773d936e4 100644
--- a/.github/workflows/e2e_race.yml
+++ b/.github/workflows/e2e_race.yml
@@ -5,7 +5,7 @@ jobs:
build:
name: End-to-End Test (Race)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
run: |
@@ -52,7 +52,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -87,4 +87,4 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
timeout-minutes: 30
run: |
- make e2e_test_race
+ NOVTADMINBUILD=1 make e2e_test_race
diff --git a/.github/workflows/endtoend.yml b/.github/workflows/endtoend.yml
index c7d83929e82..1c0b5f00342 100644
--- a/.github/workflows/endtoend.yml
+++ b/.github/workflows/endtoend.yml
@@ -5,7 +5,7 @@ jobs:
build:
name: End-to-End Test
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
run: |
@@ -52,7 +52,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -78,10 +78,14 @@ jobs:
- name: Build
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
- make build
+ NOVTADMINBUILD=1 make build
- name: endtoend
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
timeout-minutes: 30
run: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+
eatmydata -- tools/e2e_test_runner.sh
diff --git a/.github/workflows/local_example.yml b/.github/workflows/local_example.yml
index b90e9b463d0..bd6bbb247e3 100644
--- a/.github/workflows/local_example.yml
+++ b/.github/workflows/local_example.yml
@@ -4,11 +4,10 @@ permissions: read-all
jobs:
build:
- name: Local example using ${{ matrix.topo }} on ${{ matrix.os }}
- runs-on: ${{ matrix.os }}
+ name: Local example using ${{ matrix.topo }} on ubuntu-22.04
+ runs-on: gh-hosted-runners-16cores-1
strategy:
matrix:
- os: [ubuntu-22.04]
topo: [consul,etcd,k8s]
steps:
@@ -58,7 +57,13 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.examples == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
+
+ - uses: actions/setup-node@v3
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.examples == 'true'
+ with:
+ # node-version should match package.json
+ node-version: '18.16.0'
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.examples == 'true'
diff --git a/.github/workflows/region_example.yml b/.github/workflows/region_example.yml
index b795eee02a2..5d58a6bb5bf 100644
--- a/.github/workflows/region_example.yml
+++ b/.github/workflows/region_example.yml
@@ -4,11 +4,10 @@ permissions: read-all
jobs:
build:
- name: Region Sharding example using ${{ matrix.topo }} on ${{ matrix.os }}
- runs-on: ${{ matrix.os }}
+ name: Region Sharding example using ${{ matrix.topo }} on ubuntu-22.04
+ runs-on: gh-hosted-runners-16cores-1
strategy:
matrix:
- os: [ubuntu-22.04]
topo: [etcd]
steps:
@@ -58,7 +57,13 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.examples == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
+
+ - uses: actions/setup-node@v3
+ if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.examples == 'true'
+ with:
+ # node-version should match package.json
+ node-version: '18.16.0'
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.examples == 'true'
diff --git a/.github/workflows/static_checks_etc.yml b/.github/workflows/static_checks_etc.yml
index 478ce2da297..34714d00256 100644
--- a/.github/workflows/static_checks_etc.yml
+++ b/.github/workflows/static_checks_etc.yml
@@ -33,12 +33,6 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
- - name: Run FOSSA scan and upload build data
- if: steps.skip-workflow.outputs.skip-workflow == 'false'
- uses: fossa-contrib/fossa-action@v2
- with:
- fossa-api-key: 76d7483ea206d530d9452e44bffe7ba8
-
- name: Check for changes in Go files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: frouioui/paths-filter@main
@@ -102,12 +96,13 @@ jobs:
release_notes:
- 'changelog/**'
- './go/tools/releases/**'
+ - '.github/workflows/static_checks_etc.yml'
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && (steps.changes.outputs.go_files == 'true' || steps.changes.outputs.parser_changes == 'true' || steps.changes.outputs.proto_changes == 'true')
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.go_files == 'true'
@@ -218,3 +213,4 @@ jobs:
echo 'Running `go run ./go/tools/releases/releases.go` on CI yields the following changes:'
echo "$output"
echo ""
+ exit 1
diff --git a/.github/workflows/unit_race.yml b/.github/workflows/unit_race.yml
index 3c45330cbe6..80121299139 100644
--- a/.github/workflows/unit_race.yml
+++ b/.github/workflows/unit_race.yml
@@ -10,7 +10,7 @@ jobs:
build:
name: Unit Test (Race)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-16cores-1
steps:
- name: Skip CI
run: |
@@ -57,7 +57,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
@@ -95,4 +95,9 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
timeout-minutes: 45
run: |
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+ export NOVTADMINBUILD=1
+
eatmydata -- make unit_test_race
diff --git a/.github/workflows/unit_test_mysql57.yml b/.github/workflows/unit_test_mysql57.yml
index a14b469c148..5c5b9c2a206 100644
--- a/.github/workflows/unit_test_mysql57.yml
+++ b/.github/workflows/unit_test_mysql57.yml
@@ -16,7 +16,7 @@ env:
jobs:
test:
name: Unit Test (mysql57)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
@@ -128,7 +135,7 @@ jobs:
make tools
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -137,19 +144,27 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
timeout-minutes: 30
run: |
+ set -exo pipefail
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+
+ export NOVTADMINBUILD=1
eatmydata -- make unit_test | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/unit_test_mysql80.yml b/.github/workflows/unit_test_mysql80.yml
index b969104eaa6..0427ef18158 100644
--- a/.github/workflows/unit_test_mysql80.yml
+++ b/.github/workflows/unit_test_mysql80.yml
@@ -16,7 +16,7 @@ env:
jobs:
test:
name: Unit Test (mysql80)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
@@ -36,6 +36,13 @@ jobs:
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
+ PR_DATA=$(curl \
+ -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
+ -H "Accept: application/vnd.github.v3+json" \
+ "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}")
+ draft=$(echo "$PR_DATA" | jq .draft -r)
+ echo "is_draft=${draft}" >> $GITHUB_OUTPUT
+
- name: Check out code
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: actions/checkout@v3
@@ -64,7 +71,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
@@ -125,7 +132,7 @@ jobs:
make tools
- name: Setup launchable dependencies
- if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' && github.base_ref == 'main'
+ if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' && github.base_ref == 'main'
run: |
# Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up
pip3 install --user launchable~=1.0 > /dev/null
@@ -134,19 +141,27 @@ jobs:
launchable verify || true
# Tell Launchable about the build you are producing and testing
- launchable record build --name "$GITHUB_RUN_ID" --source .
+ launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source .
- name: Run test
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true'
timeout-minutes: 30
run: |
+ set -exo pipefail
+ # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file
+ # which musn't be more than 107 characters long.
+ export VTDATAROOT="/tmp/"
+
+ export NOVTADMINBUILD=1
eatmydata -- make unit_test | tee -a output.txt | go-junit-report -set-exit-code > report.xml
- - name: Print test output and Record test result in launchable
+ - name: Print test output and Record test result in launchable if PR is not a draft
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.unit_tests == 'true' && always()
run: |
- # send recorded tests to launchable
- launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then
+ # send recorded tests to launchable
+ launchable record tests --build "$GITHUB_RUN_ID" go-test . || true
+ fi
# print test output
cat output.txt
diff --git a/.github/workflows/update_golang_version.yml b/.github/workflows/update_golang_version.yml
index edb249ea74e..a01e3a815e0 100644
--- a/.github/workflows/update_golang_version.yml
+++ b/.github/workflows/update_golang_version.yml
@@ -9,19 +9,20 @@ permissions: read-all
jobs:
update_golang_version:
+ if: github.repository == 'vitessio/vitess'
permissions:
contents: write
pull-requests: write
strategy:
matrix:
- branch: [ main, release-16.0, release-15.0, release-14.0 ]
+ branch: [ main, release-17.0, release-16.0, release-15.0 ]
name: Update Golang Version
runs-on: ubuntu-latest
steps:
- name: Set up Go
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Check out code
uses: actions/checkout@v3
@@ -55,8 +56,10 @@ jobs:
# Check if the PR already exists, if it does then do not create new PR.
gh pr list -S "is:open [${{ matrix.branch }}] Upgrade the Golang version to go${go_version}" > out.txt 2>&1 | true
if [ -s out.txt ]; then
+ rm -f out.txt
exit 0
fi
+ rm -f out.txt
echo "create-pr=true" >> $GITHUB_OUTPUT
- name: Create Pull Request
diff --git a/.github/workflows/upgrade_downgrade_test_backups_e2e.yml b/.github/workflows/upgrade_downgrade_test_backups_e2e.yml
index 424a2aa25b3..9532995d49c 100644
--- a/.github/workflows/upgrade_downgrade_test_backups_e2e.yml
+++ b/.github/workflows/upgrade_downgrade_test_backups_e2e.yml
@@ -13,7 +13,7 @@ jobs:
get_previous_release:
if: always()
name: Get Previous Release - Backups - E2E
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-16cores-1
outputs:
previous_release: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }}
@@ -34,7 +34,7 @@ jobs:
timeout-minutes: 60
if: always() && needs.get_previous_release.result == 'success'
name: Run Upgrade Downgrade Test - Backups - E2E
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-16cores-1
needs:
- get_previous_release
@@ -85,7 +85,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -114,7 +114,7 @@ jobs:
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get update
- sudo apt-get install percona-xtrabackup-24
+ sudo apt-get install -y percona-xtrabackup-24
# Checkout to the last release of Vitess
- name: Check out other version's code (${{ needs.get_previous_release.outputs.previous_release }})
@@ -133,7 +133,7 @@ jobs:
timeout-minutes: 10
run: |
source build.env
- make build
+ NOVTADMINBUILD=1 make build
mkdir -p /tmp/vitess-build-other/
cp -R bin /tmp/vitess-build-other/
rm -Rf bin/*
@@ -153,7 +153,7 @@ jobs:
timeout-minutes: 10
run: |
source build.env
- make build
+ NOVTADMINBUILD=1 make build
mkdir -p /tmp/vitess-build-current/
cp -R bin /tmp/vitess-build-current/
diff --git a/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml b/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml
index 3e21997dfd0..cc8e3afb42a 100644
--- a/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml
+++ b/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml
@@ -13,7 +13,7 @@ jobs:
get_next_release:
if: always()
name: Get Latest Release - Backups - E2E - Next Release
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-16cores-1
outputs:
next_release: ${{ steps.output-next-release-ref.outputs.next_release_ref }}
@@ -34,7 +34,7 @@ jobs:
timeout-minutes: 60
if: always() && needs.get_next_release.result == 'success'
name: Run Upgrade Downgrade Test - Backups - E2E - Next Release
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-16cores-1
needs:
- get_next_release
@@ -88,7 +88,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -117,7 +117,7 @@ jobs:
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get update
- sudo apt-get install percona-xtrabackup-24
+ sudo apt-get install -y percona-xtrabackup-24
# Checkout to the next release of Vitess
- name: Check out other version's code (${{ needs.get_next_release.outputs.next_release }})
@@ -136,7 +136,7 @@ jobs:
timeout-minutes: 10
run: |
source build.env
- make build
+ NOVTADMINBUILD=1 make build
mkdir -p /tmp/vitess-build-other/
cp -R bin /tmp/vitess-build-other/
rm -Rf bin/*
@@ -156,7 +156,7 @@ jobs:
timeout-minutes: 10
run: |
source build.env
- make build
+ NOVTADMINBUILD=1 make build
mkdir -p /tmp/vitess-build-current/
cp -R bin /tmp/vitess-build-current/
diff --git a/.github/workflows/upgrade_downgrade_test_backups_manual.yml b/.github/workflows/upgrade_downgrade_test_backups_manual.yml
index b00a9e38682..6789dda2067 100644
--- a/.github/workflows/upgrade_downgrade_test_backups_manual.yml
+++ b/.github/workflows/upgrade_downgrade_test_backups_manual.yml
@@ -13,7 +13,7 @@ jobs:
get_previous_release:
if: always()
name: Get Previous Release - Backups - Manual
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-16cores-1
outputs:
previous_release: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }}
@@ -35,7 +35,7 @@ jobs:
timeout-minutes: 40
if: always() && (needs.get_previous_release.result == 'success')
name: Run Upgrade Downgrade Test - Backups - Manual
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-16cores-1
needs:
- get_previous_release
@@ -87,7 +87,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -134,7 +134,7 @@ jobs:
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get update
- sudo apt-get install percona-xtrabackup-24
+ sudo apt-get install -y percona-xtrabackup-24
# Checkout to the last release of Vitess
- name: Checkout to the other version's code (${{ needs.get_previous_release.outputs.previous_release }})
@@ -153,7 +153,7 @@ jobs:
timeout-minutes: 5
run: |
source build.env
- make build
+ NOVTADMINBUILD=1 make build
mkdir -p /tmp/vitess-build-other/
cp -R bin /tmp/vitess-build-other/
rm -Rf bin/*
@@ -178,7 +178,7 @@ jobs:
timeout-minutes: 5
run: |
source build.env
- make build
+ NOVTADMINBUILD=1 make build
mkdir -p /tmp/vitess-build-current/
cp -R bin /tmp/vitess-build-current/
diff --git a/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml b/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml
index 3d991a5a4bd..0120571a78e 100644
--- a/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml
+++ b/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml
@@ -13,7 +13,7 @@ jobs:
get_next_release:
if: always()
name: Get Previous Release - Backups - Manual - Next Release
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-16cores-1
outputs:
next_release: ${{ steps.output-next-release-ref.outputs.next_release_ref }}
@@ -35,7 +35,7 @@ jobs:
timeout-minutes: 40
if: always() && (needs.get_next_release.result == 'success')
name: Run Upgrade Downgrade Test - Backups - Manual - Next Release
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-16cores-1
needs:
- get_next_release
@@ -90,7 +90,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -137,7 +137,7 @@ jobs:
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get update
- sudo apt-get install percona-xtrabackup-24
+ sudo apt-get install -y percona-xtrabackup-24
# Checkout to the next release of Vitess
- name: Checkout to the other version's code (${{ needs.get_next_release.outputs.next_release }})
@@ -156,7 +156,7 @@ jobs:
timeout-minutes: 5
run: |
source build.env
- make build
+ NOVTADMINBUILD=1 make build
mkdir -p /tmp/vitess-build-other/
cp -R bin /tmp/vitess-build-other/
rm -Rf bin/*
@@ -181,7 +181,7 @@ jobs:
timeout-minutes: 5
run: |
source build.env
- make build
+ NOVTADMINBUILD=1 make build
mkdir -p /tmp/vitess-build-current/
cp -R bin /tmp/vitess-build-current/
diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml b/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml
index 9aa7e8edd14..a3dc81f3723 100644
--- a/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml
+++ b/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml
@@ -16,7 +16,7 @@ jobs:
get_previous_release:
if: always()
name: Get Previous Release - Query Serving (Queries)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-16cores-1
outputs:
previous_release: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }}
@@ -36,7 +36,7 @@ jobs:
upgrade_downgrade_test:
if: always() && (needs.get_previous_release.result == 'success')
name: Run Upgrade Downgrade Test - Query Serving (Queries)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-16cores-1
needs:
- get_previous_release
@@ -87,7 +87,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -132,7 +132,7 @@ jobs:
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get update
- sudo apt-get install percona-xtrabackup-24
+ sudo apt-get install -y percona-xtrabackup-24
# Checkout to the last release of Vitess
- name: Check out other version's code (${{ needs.get_previous_release.outputs.previous_release }})
@@ -151,7 +151,7 @@ jobs:
timeout-minutes: 10
run: |
source build.env
- make build
+ NOVTADMINBUILD=1 make build
mkdir -p /tmp/vitess-build-other/
cp -R bin /tmp/vitess-build-other/
rm -Rf bin/*
@@ -171,7 +171,7 @@ jobs:
timeout-minutes: 10
run: |
source build.env
- make build
+ NOVTADMINBUILD=1 make build
mkdir -p /tmp/vitess-build-current/
cp -R bin /tmp/vitess-build-current/
diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_queries_next_release.yml b/.github/workflows/upgrade_downgrade_test_query_serving_queries_next_release.yml
index f2838971d96..923c766e377 100644
--- a/.github/workflows/upgrade_downgrade_test_query_serving_queries_next_release.yml
+++ b/.github/workflows/upgrade_downgrade_test_query_serving_queries_next_release.yml
@@ -16,7 +16,7 @@ jobs:
get_next_release:
if: always()
name: Get Latest Release - Query Serving (Queries) Next Release
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-16cores-1
outputs:
next_release: ${{ steps.output-next-release-ref.outputs.next_release_ref }}
@@ -36,7 +36,7 @@ jobs:
upgrade_downgrade_test:
if: always() && (needs.get_next_release.result == 'success')
name: Run Upgrade Downgrade Test - Query Serving (Queries) Next Release
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-16cores-1
needs:
- get_next_release
@@ -90,7 +90,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -135,7 +135,7 @@ jobs:
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get update
- sudo apt-get install percona-xtrabackup-24
+ sudo apt-get install -y percona-xtrabackup-24
# Checkout to the next release of Vitess
- name: Check out other version's code (${{ needs.get_next_release.outputs.next_release }})
@@ -154,7 +154,7 @@ jobs:
timeout-minutes: 10
run: |
source build.env
- make build
+ NOVTADMINBUILD=1 make build
mkdir -p /tmp/vitess-build-other/
cp -R bin /tmp/vitess-build-other/
rm -Rf bin/*
@@ -174,7 +174,7 @@ jobs:
timeout-minutes: 10
run: |
source build.env
- make build
+ NOVTADMINBUILD=1 make build
mkdir -p /tmp/vitess-build-current/
cp -R bin /tmp/vitess-build-current/
diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml b/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml
index 09e9fac200d..14c8afaf87f 100644
--- a/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml
+++ b/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml
@@ -16,7 +16,7 @@ jobs:
get_previous_release:
if: always()
name: Get Previous Release - Query Serving (Schema)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-16cores-1
outputs:
previous_release: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }}
@@ -36,7 +36,7 @@ jobs:
upgrade_downgrade_test:
if: always() && (needs.get_previous_release.result == 'success')
name: Run Upgrade Downgrade Test - Query Serving (Schema)
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-16cores-1
needs:
- get_previous_release
@@ -87,7 +87,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -132,7 +132,7 @@ jobs:
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get update
- sudo apt-get install percona-xtrabackup-24
+ sudo apt-get install -y percona-xtrabackup-24
# Checkout to the last release of Vitess
- name: Check out other version's code (${{ needs.get_previous_release.outputs.previous_release }})
@@ -151,7 +151,7 @@ jobs:
timeout-minutes: 10
run: |
source build.env
- make build
+ NOVTADMINBUILD=1 make build
mkdir -p /tmp/vitess-build-other/
cp -R bin /tmp/vitess-build-other/
rm -Rf bin/*
@@ -171,7 +171,7 @@ jobs:
timeout-minutes: 10
run: |
source build.env
- make build
+ NOVTADMINBUILD=1 make build
mkdir -p /tmp/vitess-build-current/
cp -R bin /tmp/vitess-build-current/
diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_schema_next_release.yml b/.github/workflows/upgrade_downgrade_test_query_serving_schema_next_release.yml
index b3b76683a00..f22ece10010 100644
--- a/.github/workflows/upgrade_downgrade_test_query_serving_schema_next_release.yml
+++ b/.github/workflows/upgrade_downgrade_test_query_serving_schema_next_release.yml
@@ -16,7 +16,7 @@ jobs:
get_next_release:
if: always()
name: Get Latest Release - Query Serving (Schema) Next Release
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-16cores-1
outputs:
next_release: ${{ steps.output-next-release-ref.outputs.next_release_ref }}
@@ -36,7 +36,7 @@ jobs:
upgrade_downgrade_test:
if: always() && (needs.get_next_release.result == 'success')
name: Run Upgrade Downgrade Test - Query Serving (Schema) Next Release
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-16cores-1
needs:
- get_next_release
@@ -90,7 +90,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -135,7 +135,7 @@ jobs:
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get update
- sudo apt-get install percona-xtrabackup-24
+ sudo apt-get install -y percona-xtrabackup-24
# Checkout to the next release of Vitess
- name: Check out other version's code (${{ needs.get_next_release.outputs.next_release }})
@@ -154,7 +154,7 @@ jobs:
timeout-minutes: 10
run: |
source build.env
- make build
+ NOVTADMINBUILD=1 make build
mkdir -p /tmp/vitess-build-other/
cp -R bin /tmp/vitess-build-other/
rm -Rf bin/*
@@ -174,7 +174,7 @@ jobs:
timeout-minutes: 10
run: |
source build.env
- make build
+ NOVTADMINBUILD=1 make build
mkdir -p /tmp/vitess-build-current/
cp -R bin /tmp/vitess-build-current/
diff --git a/.github/workflows/upgrade_downgrade_test_reparent_new_vtctl.yml b/.github/workflows/upgrade_downgrade_test_reparent_new_vtctl.yml
index 34d34965dfd..82d6f267856 100644
--- a/.github/workflows/upgrade_downgrade_test_reparent_new_vtctl.yml
+++ b/.github/workflows/upgrade_downgrade_test_reparent_new_vtctl.yml
@@ -16,7 +16,7 @@ jobs:
get_next_release:
if: always()
name: Get Latest Release - Reparent New Vtctl
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-16cores-1
outputs:
next_release: ${{ steps.output-next-release-ref.outputs.next_release_ref }}
@@ -36,7 +36,7 @@ jobs:
upgrade_downgrade_test:
if: always() && (needs.get_next_release.result == 'success')
name: Run Upgrade Downgrade Test - Reparent New Vtctl
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-16cores-1
needs:
- get_next_release
@@ -90,7 +90,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -135,7 +135,7 @@ jobs:
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get update
- sudo apt-get install percona-xtrabackup-24
+ sudo apt-get install -y percona-xtrabackup-24
# Checkout to the next release of Vitess
- name: Check out other version's code (${{ needs.get_next_release.outputs.next_release }})
@@ -154,7 +154,7 @@ jobs:
timeout-minutes: 10
run: |
source build.env
- make build
+ NOVTADMINBUILD=1 make build
mkdir -p /tmp/vitess-build-other/
cp -R bin /tmp/vitess-build-other/
rm -Rf bin/*
@@ -174,7 +174,7 @@ jobs:
timeout-minutes: 10
run: |
source build.env
- make build
+ NOVTADMINBUILD=1 make build
mkdir -p /tmp/vitess-build-current/
cp -R bin /tmp/vitess-build-current/
diff --git a/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml b/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml
index a5f0cf682d3..c5b6c964124 100644
--- a/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml
+++ b/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml
@@ -16,7 +16,7 @@ jobs:
get_next_release:
if: always()
name: Get Latest Release - Reparent New VTTablet
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-16cores-1
outputs:
next_release: ${{ steps.output-next-release-ref.outputs.next_release_ref }}
@@ -36,7 +36,7 @@ jobs:
upgrade_downgrade_test:
if: always() && (needs.get_next_release.result == 'success')
name: Run Upgrade Downgrade Test - Reparent New VTTablet
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-16cores-1
needs:
- get_next_release
@@ -90,7 +90,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -135,7 +135,7 @@ jobs:
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get update
- sudo apt-get install percona-xtrabackup-24
+ sudo apt-get install -y percona-xtrabackup-24
# Checkout to the next release of Vitess
- name: Check out other version's code (${{ needs.get_next_release.outputs.next_release }})
@@ -154,7 +154,7 @@ jobs:
timeout-minutes: 10
run: |
source build.env
- make build
+ NOVTADMINBUILD=1 make build
mkdir -p /tmp/vitess-build-other/
cp -R bin /tmp/vitess-build-other/
rm -Rf bin/*
@@ -174,7 +174,7 @@ jobs:
timeout-minutes: 10
run: |
source build.env
- make build
+ NOVTADMINBUILD=1 make build
mkdir -p /tmp/vitess-build-current/
cp -R bin /tmp/vitess-build-current/
diff --git a/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml b/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml
index 86760edab8b..c4391efdef5 100644
--- a/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml
+++ b/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml
@@ -16,7 +16,7 @@ jobs:
get_previous_release:
if: always()
name: Get Previous Release - Reparent Old Vtctl
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-16cores-1
outputs:
previous_release: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }}
@@ -36,7 +36,7 @@ jobs:
upgrade_downgrade_test:
if: always() && (needs.get_previous_release.result == 'success')
name: Run Upgrade Downgrade Test - Reparent Old Vtctl
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-16cores-1
needs:
- get_previous_release
@@ -87,7 +87,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -132,7 +132,7 @@ jobs:
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get update
- sudo apt-get install percona-xtrabackup-24
+ sudo apt-get install -y percona-xtrabackup-24
# Checkout to the last release of Vitess
- name: Check out other version's code (${{ needs.get_previous_release.outputs.previous_release }})
@@ -151,7 +151,7 @@ jobs:
timeout-minutes: 10
run: |
source build.env
- make build
+ NOVTADMINBUILD=1 make build
mkdir -p /tmp/vitess-build-other/
cp -R bin /tmp/vitess-build-other/
rm -Rf bin/*
@@ -171,7 +171,7 @@ jobs:
timeout-minutes: 10
run: |
source build.env
- make build
+ NOVTADMINBUILD=1 make build
mkdir -p /tmp/vitess-build-current/
cp -R bin /tmp/vitess-build-current/
diff --git a/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml b/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml
index 44e2e54909b..f3ffcaa2d17 100644
--- a/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml
+++ b/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml
@@ -16,7 +16,7 @@ jobs:
get_previous_release:
if: always()
name: Get Previous Release - Reparent Old VTTablet
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-16cores-1
outputs:
previous_release: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }}
@@ -36,7 +36,7 @@ jobs:
upgrade_downgrade_test:
if: always() && (needs.get_previous_release.result == 'success')
name: Run Upgrade Downgrade Test - Reparent Old VTTablet
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-16cores-1
needs:
- get_previous_release
@@ -87,7 +87,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@v4
with:
- go-version: 1.20.5
+ go-version: 1.21.3
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
@@ -132,7 +132,7 @@ jobs:
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get update
- sudo apt-get install percona-xtrabackup-24
+ sudo apt-get install -y percona-xtrabackup-24
# Checkout to the last release of Vitess
- name: Check out other version's code (${{ needs.get_previous_release.outputs.previous_release }})
@@ -151,7 +151,7 @@ jobs:
timeout-minutes: 10
run: |
source build.env
- make build
+ NOVTADMINBUILD=1 make build
mkdir -p /tmp/vitess-build-other/
cp -R bin /tmp/vitess-build-other/
rm -Rf bin/*
@@ -171,7 +171,7 @@ jobs:
timeout-minutes: 10
run: |
source build.env
- make build
+ NOVTADMINBUILD=1 make build
mkdir -p /tmp/vitess-build-current/
cp -R bin /tmp/vitess-build-current/
diff --git a/.github/workflows/vtadmin_web_build.yml b/.github/workflows/vtadmin_web_build.yml
index 441561447ee..24ade4d9227 100644
--- a/.github/workflows/vtadmin_web_build.yml
+++ b/.github/workflows/vtadmin_web_build.yml
@@ -16,7 +16,7 @@ permissions: read-all
jobs:
build:
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
run: |
@@ -42,7 +42,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false'
with:
# node-version should match package.json
- node-version: '16.19.0'
+ node-version: '18.16.0'
- name: Install dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false'
diff --git a/.github/workflows/vtadmin_web_lint.yml b/.github/workflows/vtadmin_web_lint.yml
index fe88053ff5a..055e1934fb0 100644
--- a/.github/workflows/vtadmin_web_lint.yml
+++ b/.github/workflows/vtadmin_web_lint.yml
@@ -42,7 +42,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false'
with:
# node-version should match package.json
- node-version: '16.19.0'
+ node-version: '18.16.0'
- name: Install dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false'
diff --git a/.github/workflows/vtadmin_web_unit_tests.yml b/.github/workflows/vtadmin_web_unit_tests.yml
index cab00c8dea9..1efa474fde3 100644
--- a/.github/workflows/vtadmin_web_unit_tests.yml
+++ b/.github/workflows/vtadmin_web_unit_tests.yml
@@ -16,7 +16,7 @@ permissions: read-all
jobs:
unit-tests:
- runs-on: ubuntu-22.04
+ runs-on: gh-hosted-runners-4cores-1
steps:
- name: Skip CI
run: |
@@ -42,7 +42,7 @@ jobs:
if: steps.skip-workflow.outputs.skip-workflow == 'false'
with:
# node-version should match package.json
- node-version: '16.19.0'
+ node-version: '18.16.0'
- name: Install dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false'
diff --git a/.golangci.yml b/.golangci.yml
index 50bf68f4bfb..9c674953a76 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -1,8 +1,6 @@
run:
- go: 1.19
+ go: 1.20
timeout: 10m
- skip-dirs:
- - go/vt/topo/k8stopo/client
linters-settings:
errcheck:
diff --git a/GOVERNANCE.md b/GOVERNANCE.md
index ffe72221dce..8b408d8ba55 100644
--- a/GOVERNANCE.md
+++ b/GOVERNANCE.md
@@ -55,13 +55,13 @@ A maintainer is not allowed to merge their change without approval from other ma
Anyone can become a maintainer; there are no special requirements, other than to have shown a willingness and ability to participate in the project as a team player. Typically, a potential maintainer will need to show that they have an understanding of the project, its objectives and its strategy. They will also have provided valuable contributions to the project over a period of time.
-New maintainers can be nominated by any existing maintainer. Once they have been nominated, there will be a vote by the steering committee. Maintainer voting is one of the few activities that takes place on the project’s private management list. This is to allow committee members to freely express their opinions about a nominee without causing embarrassment. Once the vote has been held, the aggregated voting results are published on the public mailing list. The nominee is entitled to request an explanation of any ‘no’ votes against them, regardless of the outcome of the vote. This explanation will be provided by the Steering Committee Chair (see below) and will be anonymous and constructive in nature.
+New maintainers can be nominated by any existing maintainer. Once they have been nominated, there will be a vote by the maintainer team to decide whether to accept or reject the nomination.
-Nominees may decline their appointment as a maintainer. However, this is unusual, as the project does not expect any specific time or resource commitment from its community members. The intention behind the role of maintainer is to allow people to contribute to the project more easily, not to tie them in to the project in any formal way.
+Nominees may decline their appointment as a maintainer. The project does not expect any specific time or resource commitment from its community members, however it is expected that maintainers are evangelists for the project.
-It is important to recognise that maintainer-ship is a privilege, not a right. That privilege must be earned and once earned it can be removed by the Steering Committee for conduct inconsistent with the [Guiding Principles](https://github.com/vitessio/vitess/blob/main/GUIDING_PRINCIPLES.md) or if they drop below a level of commitment and engagement required to be a maintainer, as determined by the Steering Committee. The Steering Committee also reserves the right to remove a person for any other reason inconsistent with the goals of the project.
+It is important to recognise that maintainer-ship is a privilege, not a right. That privilege must be earned and once earned it can be removed by the [Steering Committee](https://github.com/vitessio/vitess/blob/main/STEERING.md) for conduct inconsistent with the [Guiding Principles](https://github.com/vitessio/vitess/blob/main/GUIDING_PRINCIPLES.md) or if they drop below a level of commitment and engagement required to be a maintainer, as determined by the Steering Committee. The Steering Committee also reserves the right to remove a person for any other reason inconsistent with the goals of the project.
-A maintainer who shows an above-average level of contribution to the project, particularly with respect to its strategic direction and long-term health, may be nominated to become a member of the Steering Committee. This role is described in the [Steering Committee document](https://github.com/vitessio/vitess/blob/main/STEERING.md).
+A maintainer who shows an above-average level of contribution to the project, particularly with respect to its strategic direction and long-term health, may be nominated to become a member of the Steering Committee.
# Support
diff --git a/Makefile b/Makefile
index a063d405ba9..249dafa6a47 100644
--- a/Makefile
+++ b/Makefile
@@ -61,6 +61,10 @@ ifdef VT_EXTRA_BUILD_FLAGS
export EXTRA_BUILD_FLAGS := $(VT_EXTRA_BUILD_FLAGS)
endif
+ifdef VT_EXTRA_BUILD_LDFLAGS
+export EXTRA_BUILD_LDFLAGS := $(VT_EXTRA_BUILD_LDFLAGS)
+endif
+
# This should be the root of the vitess Git directory.
ifndef VTROOT
export VTROOT=${PWD}
@@ -76,7 +80,7 @@ ifndef NOBANNER
endif
bash ./build.env
go build -trimpath $(EXTRA_BUILD_FLAGS) $(VT_GO_PARALLEL) \
- -ldflags "$(shell tools/build_version_flags.sh)" \
+ -ldflags "$(EXTRA_BUILD_LDFLAGS) $(shell tools/build_version_flags.sh)" \
-o ${VTROOTBIN} ./go/...
# build the vitess binaries statically
@@ -89,8 +93,12 @@ endif
# Binaries will be placed in ${VTROOTBIN}.
CGO_ENABLED=0 go build \
-trimpath $(EXTRA_BUILD_FLAGS) $(VT_GO_PARALLEL) \
- -ldflags "$(shell tools/build_version_flags.sh)" \
+ -ldflags "$(EXTRA_BUILD_LDFLAGS) $(shell tools/build_version_flags.sh)" \
-o ${VTROOTBIN} ./go/...
+ifndef NOVTADMINBUILD
+ echo "Building VTAdmin Web, disable VTAdmin build by setting 'NOVTADMINBUILD'"
+ PREFIX="" ./web/vtadmin/build.sh
+endif
# cross-build can be used to cross-compile Vitess client binaries
# Outside of select client binaries (namely vtctlclient & vtexplain), cross-compiled Vitess Binaries are not recommended for production deployments
@@ -107,7 +115,7 @@ endif
mkdir -p ${VTROOTBIN}/${GOOS}_${GOARCH}
CGO_ENABLED=0 GOOS=${GOOS} GOARCH=${GOARCH} go build \
-trimpath $(EXTRA_BUILD_FLAGS) $(VT_GO_PARALLEL) \
- -ldflags "$(shell tools/build_version_flags.sh)" \
+ -ldflags "$(EXTRA_BUILD_LDFLAGS) $(shell tools/build_version_flags.sh)" \
-o ${VTROOTBIN}/${GOOS}_${GOARCH} ./go/...
@if [ ! -x "${VTROOTBIN}/${GOOS}_${GOARCH}/vttablet" ]; then \
@@ -121,7 +129,7 @@ endif
bash ./build.env
go build -trimpath \
$(EXTRA_BUILD_FLAGS) $(VT_GO_PARALLEL) \
- -ldflags "$(shell tools/build_version_flags.sh)" \
+ -ldflags "$(EXTRA_BUILD_LDFLAGS) $(shell tools/build_version_flags.sh)" \
-gcflags -'N -l' \
-o ${VTROOTBIN} ./go/...
@@ -243,7 +251,6 @@ install_protoc-gen-go:
PROTO_SRCS = $(wildcard proto/*.proto)
PROTO_SRC_NAMES = $(basename $(notdir $(PROTO_SRCS)))
-PROTO_GO_OUTS = $(foreach name, $(PROTO_SRC_NAMES), go/vt/proto/$(name)/$(name).pb.go)
# This rule rebuilds all the go files from the proto definitions for gRPC.
proto: $(PROTO_GO_OUTS) vtadmin_web_proto_types
@@ -256,9 +263,10 @@ $(PROTO_GO_OUTS): minimaltools install_protoc-gen-go proto/*.proto
--go_out=. --plugin protoc-gen-go="${VTROOTBIN}/protoc-gen-go" \
--go-grpc_out=. --plugin protoc-gen-go-grpc="${VTROOTBIN}/protoc-gen-go-grpc" \
--go-vtproto_out=. --plugin protoc-gen-go-vtproto="${VTROOTBIN}/protoc-gen-go-vtproto" \
- --go-vtproto_opt=features=marshal+unmarshal+size+pool \
+ --go-vtproto_opt=features=marshal+unmarshal+size+pool+clone \
--go-vtproto_opt=pool=vitess.io/vitess/go/vt/proto/query.Row \
--go-vtproto_opt=pool=vitess.io/vitess/go/vt/proto/binlogdata.VStreamRowsResponse \
+ --go-vtproto_opt=pool=vitess.io/vitess/go/vt/proto/binlogdata.VStreamTablesResponse \
-I${PWD}/dist/vt-protoc-21.3/include:proto $(PROTO_SRCS)
cp -Rf vitess.io/vitess/go/vt/proto/* go/vt/proto
rm -rf vitess.io/vitess/go/vt/proto/
@@ -269,7 +277,7 @@ $(PROTO_GO_OUTS): minimaltools install_protoc-gen-go proto/*.proto
# This rule builds the bootstrap images for all flavors.
DOCKER_IMAGES_FOR_TEST = mysql57 mysql80 percona57 percona80
DOCKER_IMAGES = common $(DOCKER_IMAGES_FOR_TEST)
-BOOTSTRAP_VERSION=18.0
+BOOTSTRAP_VERSION=22.1
ensure_bootstrap_version:
find docker/ -type f -exec sed -i "s/^\(ARG bootstrap_version\)=.*/\1=${BOOTSTRAP_VERSION}/" {} \;
sed -i 's/\(^.*flag.String(\"bootstrap-version\",\) *\"[^\"]\+\"/\1 \"${BOOTSTRAP_VERSION}\"/' test.go
@@ -332,6 +340,9 @@ DOCKER_LITE_TARGETS = $(addprefix docker_lite_,$(DOCKER_LITE_SUFFIX))
$(DOCKER_LITE_TARGETS): docker_lite_%:
${call build_docker_image,docker/lite/Dockerfile.$*,vitess/lite:$*}
+docker_lite_push:
+ for i in $(DOCKER_LITE_SUFFIX); do echo "pushing lite image: $$i"; docker push vitess/lite:$$i || exit 1; done
+
docker_lite_all: docker_lite $(DOCKER_LITE_TARGETS)
docker_local:
@@ -384,69 +395,11 @@ tools:
minimaltools:
echo $$(date): Installing minimal dependencies
- BUILD_CHROME=0 BUILD_JAVA=0 BUILD_CONSUL=0 ./bootstrap.sh
+ BUILD_JAVA=0 BUILD_CONSUL=0 ./bootstrap.sh
dependency_check:
./tools/dependency_check.sh
-install_k8s-code-generator: tools/tools.go go.mod
- go install k8s.io/code-generator/cmd/deepcopy-gen
- go install k8s.io/code-generator/cmd/client-gen
- go install k8s.io/code-generator/cmd/lister-gen
- go install k8s.io/code-generator/cmd/informer-gen
-
-DEEPCOPY_GEN=$(VTROOTBIN)/deepcopy-gen
-CLIENT_GEN=$(VTROOTBIN)/client-gen
-LISTER_GEN=$(VTROOTBIN)/lister-gen
-INFORMER_GEN=$(VTROOTBIN)/informer-gen
-
-GEN_BASE_DIR ?= vitess.io/vitess/go/vt/topo/k8stopo
-
-client_go_gen: install_k8s-code-generator
- echo $$(date): Regenerating client-go code
- # Delete and re-generate the deepcopy types
- find $(VTROOT)/go/vt/topo/k8stopo/apis/topo/v1beta1 -name "zz_generated.deepcopy.go" -delete
-
- # We output to ./ and then copy over the generated files to the appropriate path
- # This is done so we don't have rely on the repository being cloned to `$GOPATH/src/vitess.io/vitess`
-
- $(DEEPCOPY_GEN) -o ./ \
- --input-dirs $(GEN_BASE_DIR)/apis/topo/v1beta1 \
- -O zz_generated.deepcopy \
- --bounding-dirs $(GEN_BASE_DIR)/apis \
- --go-header-file ./go/vt/topo/k8stopo/boilerplate.go.txt
-
- # Delete existing code
- rm -rf go/vt/topo/k8stopo/client
-
- # Generate clientset
- $(CLIENT_GEN) -o ./ \
- --clientset-name versioned \
- --input-base $(GEN_BASE_DIR)/apis \
- --input 'topo/v1beta1' \
- --output-package $(GEN_BASE_DIR)/client/clientset \
- --fake-clientset=true \
- --go-header-file ./go/vt/topo/k8stopo/boilerplate.go.txt
-
- # Generate listers
- $(LISTER_GEN) -o ./ \
- --input-dirs $(GEN_BASE_DIR)/apis/topo/v1beta1 \
- --output-package $(GEN_BASE_DIR)/client/listers \
- --go-header-file ./go/vt/topo/k8stopo/boilerplate.go.txt
-
- # Generate informers
- $(INFORMER_GEN) -o ./ \
- --input-dirs $(GEN_BASE_DIR)/apis/topo/v1beta1 \
- --output-package $(GEN_BASE_DIR)/client/informers \
- --versioned-clientset-package $(GEN_BASE_DIR)/client/clientset/versioned \
- --listers-package $(GEN_BASE_DIR)/client/listers \
- --go-header-file ./go/vt/topo/k8stopo/boilerplate.go.txt
-
- # Move and cleanup
- mv vitess.io/vitess/go/vt/topo/k8stopo/client go/vt/topo/k8stopo/
- mv vitess.io/vitess/go/vt/topo/k8stopo/apis/topo/v1beta1/zz_generated.deepcopy.go go/vt/topo/k8stopo/apis/topo/v1beta1/zz_generated.deepcopy.go
- rm -rf vitess.io/vitess/go/vt/topo/k8stopo/
-
vtadmin_web_install:
cd web/vtadmin && npm install
diff --git a/bootstrap.sh b/bootstrap.sh
index de4efcf5363..f95302ea771 100755
--- a/bootstrap.sh
+++ b/bootstrap.sh
@@ -25,7 +25,6 @@ source ./dev.env
BUILD_JAVA=${BUILD_JAVA:-1}
BUILD_CONSUL=${BUILD_CONSUL:-1}
-BUILD_CHROME=${BUILD_CHROME:-1}
VITESS_RESOURCES_DOWNLOAD_BASE_URL="https://github.com/vitessio/vitess-resources/releases/download"
VITESS_RESOURCES_RELEASE="v4.0"
@@ -171,35 +170,6 @@ install_etcd() {
ln -snf "$dist/etcd-${version}-${platform}-${target}/etcdctl" "$VTROOT/bin/etcdctl"
}
-
-# Download and install k3s, link k3s binary into our root
-install_k3s() {
- local version="$1"
- local dist="$2"
- case $(uname) in
- Linux) local platform=linux;;
- *) echo "WARNING: unsupported platform. K3s only supports running on Linux, the k8s topology will not be available for local examples."; return;;
- esac
-
- case $(get_arch) in
- aarch64) local target="-arm64";;
- x86_64) local target="";;
- arm64) local target="-arm64";;
- *) echo "WARNING: unsupported architecture, the k8s topology will not be available for local examples."; return;;
- esac
-
- file="k3s${target}"
-
- local dest="$dist/k3s${target}-${version}-${platform}"
- # This is how we'd download directly from source:
- # download_url=https://github.com/rancher/k3s/releases/download
- # wget -O $dest "$download_url/$version/$file"
- "${VTROOT}/tools/wget-retry" -O $dest "${VITESS_RESOURCES_DOWNLOAD_URL}/$file-$version"
- chmod +x $dest
- ln -snf $dest "$VTROOT/bin/k3s"
-}
-
-
# Download and install consul, link consul binary into our root.
install_consul() {
local version="$1"
@@ -227,37 +197,6 @@ install_consul() {
}
-# Download chromedriver
-install_chromedriver() {
- local version="$1"
- local dist="$2"
-
- case $(uname) in
- Linux) local platform=linux;;
- *) echo "Platform not supported for vtctl-web tests. Skipping chromedriver install."; return;;
- esac
-
- if [ "$(arch)" == "aarch64" ] ; then
- os=$(cat /etc/*release | grep "^ID=" | cut -d '=' -f 2)
- case $os in
- ubuntu|debian)
- sudo apt-get update -y && sudo apt install -y --no-install-recommends unzip libglib2.0-0 libnss3 libx11-6
- ;;
- centos|fedora)
- sudo yum update -y && yum install -y libX11 unzip wget
- ;;
- esac
- echo "For Arm64, using prebuilt binary from electron (https://github.com/electron/electron/) of version 76.0.3809.126"
- "${VTROOT}/tools/wget-retry" https://github.com/electron/electron/releases/download/v6.0.3/chromedriver-v6.0.3-linux-arm64.zip
- unzip -o -q chromedriver-v6.0.3-linux-arm64.zip -d "$dist"
- rm chromedriver-v6.0.3-linux-arm64.zip
- else
- "${VTROOT}/tools/wget-retry" "https://chromedriver.storage.googleapis.com/$version/chromedriver_linux64.zip"
- unzip -o -q chromedriver_linux64.zip -d "$dist"
- rm chromedriver_linux64.zip
- fi
-}
-
# Download and install toxiproxy, link toxiproxy binary into our root.
install_toxiproxy() {
local version="$1"
@@ -299,19 +238,11 @@ install_all() {
# etcd
install_dep "etcd" "v3.5.6" "$VTROOT/dist/etcd" install_etcd
- # k3s
- command -v k3s || install_dep "k3s" "v1.0.0" "$VTROOT/dist/k3s" install_k3s
-
# consul
if [ "$BUILD_CONSUL" == 1 ] ; then
install_dep "Consul" "1.11.4" "$VTROOT/dist/consul" install_consul
fi
- # chromedriver
- if [ "$BUILD_CHROME" == 1 ] ; then
- install_dep "chromedriver" "90.0.4430.24" "$VTROOT/dist/chromedriver" install_chromedriver
- fi
-
# toxiproxy
install_dep "toxiproxy" "v2.5.0" "$VTROOT/dist/toxiproxy" install_toxiproxy
diff --git a/build.env b/build.env
index 038f791c3c3..b9e44331e65 100755
--- a/build.env
+++ b/build.env
@@ -17,7 +17,7 @@
source ./tools/shell_functions.inc
go version >/dev/null 2>&1 || fail "Go is not installed or is not in \$PATH. See https://vitess.io/contributing/build-from-source for install instructions."
-goversion_min 1.20.5 || echo "Go version reported: `go version`. Version 1.20.5+ recommended. See https://vitess.io/contributing/build-from-source for install instructions."
+goversion_min 1.21.3 || echo "Go version reported: `go version`. Version 1.21.3+ recommended. See https://vitess.io/contributing/build-from-source for install instructions."
mkdir -p dist
mkdir -p bin
diff --git a/changelog/15.0/15.0.2/summary.md b/changelog/15.0/15.0.2/summary.md
index 6f3346efa47..b12a97879a5 100644
--- a/changelog/15.0/15.0.2/summary.md
+++ b/changelog/15.0/15.0.2/summary.md
@@ -3,7 +3,7 @@
### Upgrade to `go1.18.9`
Vitess `v15.0.2` now runs on `go1.18.9`.
-The patch release of Go, `go1.18.9`, was one of the main reasons for this release as it includes an important security fixe to `net/http` package, which is use extensively by Vitess.
+The patch release of Go, `go1.18.9`, was one of the main reasons for this release as it includes an important security fix to `net/http` package, which is used extensively by Vitess.
Below is a summary of this patch release. You can learn more [here](https://groups.google.com/g/golang-announce/c/L_3rmdT0BMU).
> go1.18.9 (released 2022-12-06) includes security fixes to the net/http and os packages, as well as bug fixes to cgo, the compiler, the runtime, and the crypto/x509 and os/exec packages.
diff --git a/changelog/15.0/15.0.4/changelog.md b/changelog/15.0/15.0.4/changelog.md
new file mode 100644
index 00000000000..f70fd1090a7
--- /dev/null
+++ b/changelog/15.0/15.0.4/changelog.md
@@ -0,0 +1,61 @@
+# Changelog of Vitess v15.0.4
+
+### Bug fixes
+#### Build/CI
+ * [release-15.0] Small fixes to the auto-upgrade golang tool (#12838) [#12847](https://github.com/vitessio/vitess/pull/12847)
+ * [release-15.0] Add timeout to golangci-lint and bump its version (#12852) [#12853](https://github.com/vitessio/vitess/pull/12853)
+ * [release-15.0] Remove recent golangci-lint version bump [#12910](https://github.com/vitessio/vitess/pull/12910)
+#### Cluster management
+ * [release-15.0] Prevent resetting replication every time we set replication source (#13377) [#13393](https://github.com/vitessio/vitess/pull/13393)
+ * [release-15.0] Don't run any reparent commands if the host is empty (#13396) [#13403](https://github.com/vitessio/vitess/pull/13403)
+ * [release-15.0] ignore all error for views in engine reload (#13590) [#13592](https://github.com/vitessio/vitess/pull/13592)
+#### Examples
+ * [release-15.0] `examples/compose`: fix `consul:latest` error w/`docker-compose up -d` (#13468) [#13471](https://github.com/vitessio/vitess/pull/13471)
+#### Online DDL
+ * v15 backport: vitess Online DDL atomic cut-over [#13376](https://github.com/vitessio/vitess/pull/13376)
+#### Query Serving
+ * [release-15.0] planbuilder bugfix - do not push aggregations into derived tables [#12824](https://github.com/vitessio/vitess/pull/12824)
+ * [release-15.0] Fix `vtgate_schema_tracker` flaky tests (#12780) [#12850](https://github.com/vitessio/vitess/pull/12850)
+ * [release-15.0] fix: union distinct between unsharded route and sharded join (#12968) [#12982](https://github.com/vitessio/vitess/pull/12982)
+ * gen4 planner: allow last_insert_id with arguments (15.0) [#13035](https://github.com/vitessio/vitess/pull/13035)
+ * [release-15.0] Fix the resilientQuery to give correct results during initialization (#13080) [#13086](https://github.com/vitessio/vitess/pull/13086)
+ * [release-15.0] Remove indentation limit in the sqlparser (#13158) [#13167](https://github.com/vitessio/vitess/pull/13167)
+ * [release-15.0] Fix: TabletServer ReserveBeginExecute to return transaction ID on error (#13193) [#13196](https://github.com/vitessio/vitess/pull/13196)
+ * [15.0] Fix: errant GTID in health streamer (#13184) [#13226](https://github.com/vitessio/vitess/pull/13226)
+#### Schema Tracker
+ * [release-15.0] Ignore error while reading table data in Schema.Engine reload (#13421) [#13425](https://github.com/vitessio/vitess/pull/13425)
+ * Backport v15: schema.Reload(): ignore column reading errors for views only, error for tables #13442 [#13457](https://github.com/vitessio/vitess/pull/13457)
+### Enhancement
+#### Build/CI
+ * Use go1.20.3 in the upgrade downgrade tests [#12839](https://github.com/vitessio/vitess/pull/12839)
+ * [release-15.0] Set the number of threads for release notes generation with a flag [#13315](https://github.com/vitessio/vitess/pull/13315)
+#### General
+ * Use `go1.20.4` on `release-15.0` upgrade test [#13071](https://github.com/vitessio/vitess/pull/13071)
+#### Query Serving
+ * [release-15.0] planner fix: scoping rules for JOIN ON expression inside a subquery [#12890](https://github.com/vitessio/vitess/pull/12890)
+### Internal Cleanup
+#### Operator
+ * Use vitess-operator `v2.8.4` in the examples [#12993](https://github.com/vitessio/vitess/pull/12993)
+#### VTorc
+ * [release-15.0] Remove excessive logging in VTOrc APIs (#13459) [#13463](https://github.com/vitessio/vitess/pull/13463)
+### Performance
+#### TabletManager
+ * [release-15.0] BaseShowTablesWithSizes: optimize MySQL 8.0 query (#13375) [#13388](https://github.com/vitessio/vitess/pull/13388)
+### Release
+#### Build/CI
+ * [release-15.0] Optimize release notes generation to use GitHub Milestones (#13398) [#13620](https://github.com/vitessio/vitess/pull/13620)
+#### Documentation
+ * Prepare release note `v15.0.4` [#13619](https://github.com/vitessio/vitess/pull/13619)
+### Testing
+#### Build/CI
+ * [release-15.0] fakedbclient: Add locking to avoid races (#12814) [#12821](https://github.com/vitessio/vitess/pull/12821)
+#### Cluster management
+ * [release-15.0] Flaky tests: Fix wrangler tests (#13568) [#13570](https://github.com/vitessio/vitess/pull/13570)
+#### General
+ * [release-15.0] Update Upgrade/Downgrade tests to use `go1.20.5` [#13271](https://github.com/vitessio/vitess/pull/13271)
+#### Query Serving
+ * [release-15.0] Fix benchmarks in `plan_test.go` (#13096) [#13125](https://github.com/vitessio/vitess/pull/13125)
+ * [release-15.0] Fix `TestGatewayBufferingWhileReparenting` flakiness (#13469) [#13502](https://github.com/vitessio/vitess/pull/13502)
+#### VTorc
+ * [release-15.0]: Fix flakiness in VTOrc tests (#13489) [#13529](https://github.com/vitessio/vitess/pull/13529)
+
diff --git a/changelog/15.0/15.0.4/release_notes.md b/changelog/15.0/15.0.4/release_notes.md
new file mode 100644
index 00000000000..38fa25f9c78
--- /dev/null
+++ b/changelog/15.0/15.0.4/release_notes.md
@@ -0,0 +1,7 @@
+# Release of Vitess v15.0.4
+The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/15.0/15.0.4/changelog.md).
+
+The release includes 33 merged Pull Requests.
+
+Thanks to all our contributors: @GuptaManan100, @app/vitess-bot, @frouioui, @harshit-gangal, @shlomi-noach, @systay
+
diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/gbk-charset/extra_args b/changelog/15.0/15.0.4/summary.md
similarity index 100%
rename from go/test/endtoend/onlineddl/vrepl_suite/testdata/gbk-charset/extra_args
rename to changelog/15.0/15.0.4/summary.md
diff --git a/changelog/15.0/README.md b/changelog/15.0/README.md
index 332f1a7eb24..17807db2ebc 100644
--- a/changelog/15.0/README.md
+++ b/changelog/15.0/README.md
@@ -1,5 +1,9 @@
## v15.0
The dedicated team for this release can be found [here](team.md).
+* **[15.0.4](15.0.4)**
+ * [Changelog](15.0.4/changelog.md)
+ * [Release Notes](15.0.4/release_notes.md)
+
* **[15.0.3](15.0.3)**
* [Changelog](15.0.3/changelog.md)
* [Release Notes](15.0.3/release_notes.md)
diff --git a/changelog/16.0/16.0.3/changelog.md b/changelog/16.0/16.0.3/changelog.md
new file mode 100644
index 00000000000..3f43d9b6049
--- /dev/null
+++ b/changelog/16.0/16.0.3/changelog.md
@@ -0,0 +1,67 @@
+# Changelog of Vitess v16.0.3
+
+### Bug fixes
+#### Cluster management
+ * [release-16.0] Prevent resetting replication every time we set replication source (#13377) [#13392](https://github.com/vitessio/vitess/pull/13392)
+ * [release-16.0] Don't run any reparent commands if the host is empty (#13396) [#13402](https://github.com/vitessio/vitess/pull/13402)
+ * [release-16.0] Upgrade-Downgrade Fix: Schema-initialization stuck on semi-sync ACKs while upgrading (#13411) [#13441](https://github.com/vitessio/vitess/pull/13441)
+ * [release-16.0] Flaky tests: Fix race in memory topo (#13559) [#13576](https://github.com/vitessio/vitess/pull/13576)
+ * [release-16.0] ignore all error for views in engine reload (#13590) [#13593](https://github.com/vitessio/vitess/pull/13593)
+ * [release-16.0] check keyspace snapshot time if none specified for backup restores (#13557) [#13634](https://github.com/vitessio/vitess/pull/13634)
+#### Examples
+ * [release-16.0] `examples/compose`: fix `consul:latest` error w/`docker-compose up -d` (#13468) [#13472](https://github.com/vitessio/vitess/pull/13472)
+#### Operator
+ * [release-16.0] Upgrade mysqld memory limits to 1024Mi (#13122) [#13204](https://github.com/vitessio/vitess/pull/13204)
+#### Query Serving
+ * [release-16.0] Fix the resilientQuery to give correct results during initialization (#13080) [#13087](https://github.com/vitessio/vitess/pull/13087)
+ * [16.0] evalengine: TypeOf for Columns should only use value type when we have a value [#13154](https://github.com/vitessio/vitess/pull/13154)
+ * [release-16.0] Remove indentation limit in the sqlparser (#13158) [#13166](https://github.com/vitessio/vitess/pull/13166)
+ * Fix: errant GTID in health streamer [#13184](https://github.com/vitessio/vitess/pull/13184)
+ * [16.0] Fix: TabletServer ReserveBeginExecute to return transaction ID on error [#13193](https://github.com/vitessio/vitess/pull/13193)
+ * [release-16.0] Bug fix: SQL queries erroring with message `unknown aggregation random` (#13330) [#13334](https://github.com/vitessio/vitess/pull/13334)
+ * [release-16.0] ignore ongoing backfill vindex from routing selection (#13523) [#13607](https://github.com/vitessio/vitess/pull/13607)
+#### Schema Tracker
+ * [release-16.0] Ignore error while reading table data in Schema.Engine reload (#13421) [#13424](https://github.com/vitessio/vitess/pull/13424)
+ * Backport v16: schema.Reload(): ignore column reading errors for views only, error for tables #13442 [#13456](https://github.com/vitessio/vitess/pull/13456)
+#### TabletManager
+ * [release-16.0] mysqlctl: Correctly encode database and table names (#13312) [#13323](https://github.com/vitessio/vitess/pull/13323)
+#### VReplication
+ * [release-16.0] VReplication: Do not delete sharded target vschema table entries on Cancel (#13146) [#13155](https://github.com/vitessio/vitess/pull/13155)
+ * [release-16.0] VReplication: Pass on --keep_routing_rules flag value for Cancel action (#13171) [#13194](https://github.com/vitessio/vitess/pull/13194)
+ * [release-16.0] VReplication: Fix VDiff2 DeleteByUUID Query (#13255) [#13282](https://github.com/vitessio/vitess/pull/13282)
+ * [release-16.0] VReplication: Ensure ROW events are sent within a transaction (#13547) [#13580](https://github.com/vitessio/vitess/pull/13580)
+### CI/Build
+#### General
+ * [release-16.0] Upgrade the Golang version to `go1.20.4` [#13053](https://github.com/vitessio/vitess/pull/13053)
+### Documentation
+#### Documentation
+ * [release-16.0] update link for reparenting guide (#13350) [#13356](https://github.com/vitessio/vitess/pull/13356)
+### Enhancement
+#### Build/CI
+ * [release-16.0] Set the number of threads for release notes generation with a flag [#13316](https://github.com/vitessio/vitess/pull/13316)
+### Performance
+#### TabletManager
+ * [release-16.0] BaseShowTablesWithSizes: optimize MySQL 8.0 query (#13375) [#13389](https://github.com/vitessio/vitess/pull/13389)
+### Release
+#### Build/CI
+ * [release-16.0] Optimize release notes generation to use GitHub Milestones (#13398) [#13621](https://github.com/vitessio/vitess/pull/13621)
+#### Documentation
+ * [release-16.0] Fix format error in the `v16.0.2` release notes (#13057) [#13058](https://github.com/vitessio/vitess/pull/13058)
+### Testing
+#### Backup and Restore
+ * [release-16.0]: Fix `upgrade-downgrade` test setup and fix the `init_db.sql` [#13525](https://github.com/vitessio/vitess/pull/13525)
+#### Cluster management
+ * [release-16.0] Deflake `TestPlannedReparentShardPromoteReplicaFail` (#13548) [#13549](https://github.com/vitessio/vitess/pull/13549)
+ * [release-16.0] Flaky tests: Fix wrangler tests (#13568) [#13571](https://github.com/vitessio/vitess/pull/13571)
+#### General
+ * TestFix: `Upgrade Downgrade Testing - Backups - Manual` [#13408](https://github.com/vitessio/vitess/pull/13408)
+#### Query Serving
+ * [release-16.0] Fix benchmarks in `plan_test.go` (#13096) [#13126](https://github.com/vitessio/vitess/pull/13126)
+ * [release-16.0] Deflake `TestQueryTimeoutWithDual` test (#13405) [#13409](https://github.com/vitessio/vitess/pull/13409)
+ * [release-16.0] Fix `TestGatewayBufferingWhileReparenting` flakiness (#13469) [#13500](https://github.com/vitessio/vitess/pull/13500)
+ * [release-16.0] fix TestQueryTimeoutWithTables flaky test (#13579) [#13585](https://github.com/vitessio/vitess/pull/13585)
+#### VTorc
+ * [release-16.0]: Fix flakiness in VTOrc tests (#13489) [#13528](https://github.com/vitessio/vitess/pull/13528)
+#### vtctl
+ * Fix new vtctl upgrade downgrade test on `release-16.0` [#13252](https://github.com/vitessio/vitess/pull/13252)
+
diff --git a/changelog/16.0/16.0.3/release_notes.md b/changelog/16.0/16.0.3/release_notes.md
new file mode 100644
index 00000000000..d377bdc24f9
--- /dev/null
+++ b/changelog/16.0/16.0.3/release_notes.md
@@ -0,0 +1,7 @@
+# Release of Vitess v16.0.3
+The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/16.0/16.0.3/changelog.md).
+
+The release includes 38 merged Pull Requests.
+
+Thanks to all our contributors: @GuptaManan100, @app/github-actions, @app/vitess-bot, @frouioui, @harshit-gangal, @shlomi-noach, @systay
+
diff --git a/changelog/16.0/16.0.3/summary.md b/changelog/16.0/16.0.3/summary.md
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/changelog/16.0/16.0.4/changelog.md b/changelog/16.0/16.0.4/changelog.md
new file mode 100644
index 00000000000..45c4944aa25
--- /dev/null
+++ b/changelog/16.0/16.0.4/changelog.md
@@ -0,0 +1,24 @@
+# Changelog of Vitess v16.0.4
+
+### Bug fixes
+#### Backup and Restore
+ * Manual cherry-pick of 13339 [#13733](https://github.com/vitessio/vitess/pull/13733)
+ * [release-16.0] Address vttablet memory usage with backups to Azure Blob Service (#13770) [#13774](https://github.com/vitessio/vitess/pull/13774)
+#### Online DDL
+ * v16 backport: Fix closed channel panic in Online DDL cutover [#13732](https://github.com/vitessio/vitess/pull/13732)
+ * v16 backport: Solve RevertMigration.Comment read/write concurrency issue [#13736](https://github.com/vitessio/vitess/pull/13736)
+#### Query Serving
+ * planbuilder: Fix infinite recursion for subqueries [#13783](https://github.com/vitessio/vitess/pull/13783)
+ * [release-16.0] vtgate: fix race condition iterating tables and views from schema tracker (#13673) [#13795](https://github.com/vitessio/vitess/pull/13795)
+ * [16.0] bugfixes: collection of fixes to bugs found while fuzzing [#13805](https://github.com/vitessio/vitess/pull/13805)
+### CI/Build
+#### Online DDL
+ * [release-16.0] CI: fix onlineddl_scheduler flakiness (#13754) [#13759](https://github.com/vitessio/vitess/pull/13759)
+### Release
+#### General
+ * Back to dev mode after v16.0.3 [#13660](https://github.com/vitessio/vitess/pull/13660)
+ * Release 16.0 code freeze for `v16.0.3` release [#13810](https://github.com/vitessio/vitess/pull/13810)
+### Testing
+#### Build/CI
+ * [release-16.0] Flakes: Delete VTDATAROOT files in reparent test teardown within CI (#13793) [#13797](https://github.com/vitessio/vitess/pull/13797)
+
diff --git a/changelog/16.0/16.0.4/release_notes.md b/changelog/16.0/16.0.4/release_notes.md
new file mode 100644
index 00000000000..d46559f5fec
--- /dev/null
+++ b/changelog/16.0/16.0.4/release_notes.md
@@ -0,0 +1,7 @@
+# Release of Vitess v16.0.4
+The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/16.0/16.0.4/changelog.md).
+
+The release includes 11 merged Pull Requests.
+
+Thanks to all our contributors: @GuptaManan100, @app/vitess-bot, @dbussink, @rohit-nayak-ps, @shlomi-noach, @systay
+
diff --git a/changelog/16.0/16.0.4/summary.md b/changelog/16.0/16.0.4/summary.md
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/changelog/16.0/README.md b/changelog/16.0/README.md
index 35e87974acb..75b3f3a0a1f 100644
--- a/changelog/16.0/README.md
+++ b/changelog/16.0/README.md
@@ -1,5 +1,13 @@
## v16.0
The dedicated team for this release can be found [here](team.md).
+* **[16.0.4](16.0.4)**
+ * [Changelog](16.0.4/changelog.md)
+ * [Release Notes](16.0.4/release_notes.md)
+
+* **[16.0.3](16.0.3)**
+ * [Changelog](16.0.3/changelog.md)
+ * [Release Notes](16.0.3/release_notes.md)
+
* **[16.0.2](16.0.2)**
* [Changelog](16.0.2/changelog.md)
* [Release Notes](16.0.2/release_notes.md)
diff --git a/changelog/18.0/18.0.0/changelog.md b/changelog/18.0/18.0.0/changelog.md
new file mode 100644
index 00000000000..acf45ad7b37
--- /dev/null
+++ b/changelog/18.0/18.0.0/changelog.md
@@ -0,0 +1,529 @@
+# Changelog of Vitess v18.0.0
+
+### Bug fixes
+#### Backup and Restore
+ * vtctldclient: Add missing new backup option [#13543](https://github.com/vitessio/vitess/pull/13543)
+ * Backup: safe compressor/decompressor closure [#13668](https://github.com/vitessio/vitess/pull/13668)
+ * Address vttablet memory usage with backups to Azure Blob Service [#13770](https://github.com/vitessio/vitess/pull/13770)
+ * Do not drain tablet in incremental backup [#13773](https://github.com/vitessio/vitess/pull/13773)
+ * go/cmd/vtbackup: wait for plugins to finish initializing [#14113](https://github.com/vitessio/vitess/pull/14113)
+#### Build/CI
+ * Remove `os.Exit` in release-notes generation [#13310](https://github.com/vitessio/vitess/pull/13310)
+ * CI: Fix make build related issues [#13583](https://github.com/vitessio/vitess/pull/13583)
+ * Enable failures in `tools/e2e_test_race.sh` and fix races [#13654](https://github.com/vitessio/vitess/pull/13654)
+ * Fix regular expression issue in Golang Upgrade and remove `release-14.0` from target branch [#13846](https://github.com/vitessio/vitess/pull/13846)
+ * Make `Static Code Checks Etc` fail if the `./changelog` folder is out-of-date [#14003](https://github.com/vitessio/vitess/pull/14003)
+#### CLI
+ * viperutil: Remove potential cross site reflecting issue [#13483](https://github.com/vitessio/vitess/pull/13483)
+ * [vtctldclient] flags need to be defined to be deprecated [#13681](https://github.com/vitessio/vitess/pull/13681)
+ * Fix missing deprecated flags in `vttablet` and `vtgate` [#13975](https://github.com/vitessio/vitess/pull/13975)
+ * [release-18.0] Fix anonymous paths in cobra code-gen (#14185) [#14238](https://github.com/vitessio/vitess/pull/14238)
+ * servenv: Remove double close() logic [#14457](https://github.com/vitessio/vitess/pull/14457)
+ * [release-18.0] servenv: Remove double close() logic (#14457) [#14459](https://github.com/vitessio/vitess/pull/14459)
+#### Cluster management
+ * Prevent resetting replication every time we set replication source [#13377](https://github.com/vitessio/vitess/pull/13377)
+ * Don't run any reparent commands if the host is empty [#13396](https://github.com/vitessio/vitess/pull/13396)
+ * [main] Upgrade-Downgrade Fix: Schema-initialization stuck on semi-sync ACKs while upgrading (#13411) [#13440](https://github.com/vitessio/vitess/pull/13440)
+ * fix: error.as method usage to send pointer to the reference type expected [#13496](https://github.com/vitessio/vitess/pull/13496)
+ * check keyspace snapshot time if none specified for backup restores [#13557](https://github.com/vitessio/vitess/pull/13557)
+ * Flaky tests: Fix race in memory topo [#13559](https://github.com/vitessio/vitess/pull/13559)
+ * ignore all error for views in engine reload [#13590](https://github.com/vitessio/vitess/pull/13590)
+ * Fix `BackupShard` to get its options from its own flags [#13813](https://github.com/vitessio/vitess/pull/13813)
+#### Docker
+ * Fix ubi8.arm64.mysql80 build package mirrorserver error [#13431](https://github.com/vitessio/vitess/pull/13431)
+ * Fix dependencies in docker build script [#13520](https://github.com/vitessio/vitess/pull/13520)
+ * Use NodeJS v18 in VTAdmin Dockerfile [#13751](https://github.com/vitessio/vitess/pull/13751)
+ * [release-18.0] [Docker] Fix VTadmin build (#14363) [#14378](https://github.com/vitessio/vitess/pull/14378)
+#### Evalengine
+ * Fix a number of encoding issues when evaluating expressions with the evalengine [#13509](https://github.com/vitessio/vitess/pull/13509)
+ * Fix type comparisons for Nullsafe* functions [#13605](https://github.com/vitessio/vitess/pull/13605)
+ * fastparse: Fix bug in overflow detection [#13702](https://github.com/vitessio/vitess/pull/13702)
+ * evalengine: Mark UUID() function as non-constant [#14051](https://github.com/vitessio/vitess/pull/14051)
+ * [release-18.0] evalengine: Misc bugs (#14351) [#14354](https://github.com/vitessio/vitess/pull/14354)
+#### Examples
+ * Use $hostname in vtadmin script as all other scripts do [#13231](https://github.com/vitessio/vitess/pull/13231)
+ * Local example 101: idempotent on existing clusters [#13373](https://github.com/vitessio/vitess/pull/13373)
+ * Examples: only terminate vtadmin if it was started [#13433](https://github.com/vitessio/vitess/pull/13433)
+ * `examples/compose`: fix `consul:latest` error w/`docker-compose up -d` [#13468](https://github.com/vitessio/vitess/pull/13468)
+#### General
+ * Flakes: Synchronize access to logErrStacks in vterrors [#13827](https://github.com/vitessio/vitess/pull/13827)
+ * [release-18.0] viper: register dynamic config with both disk and live (#14453) [#14455](https://github.com/vitessio/vitess/pull/14455)
+#### Online DDL
+ * Solve RevertMigration.Comment read/write concurrency issue [#13700](https://github.com/vitessio/vitess/pull/13700)
+ * Fix closed channel `panic` in Online DDL cutover [#13729](https://github.com/vitessio/vitess/pull/13729)
+ * OnlineDDL: fix nil 'completed_timestamp' for cancelled migrations [#13928](https://github.com/vitessio/vitess/pull/13928)
+ * Fix `ApplySchema --batch-size` with ` --allow-zero-in-date` [#13951](https://github.com/vitessio/vitess/pull/13951)
+ * TableGC: support DROP VIEW [#14020](https://github.com/vitessio/vitess/pull/14020)
+ * OnlineDDL: cleanup cancelled migration artifacts; support `--retain-artifacts=` DDL strategy flag [#14029](https://github.com/vitessio/vitess/pull/14029)
+ * bugfix: change column name and type to json [#14093](https://github.com/vitessio/vitess/pull/14093)
+ * [Release 18.0]: Online DDL: timeouts for all gRPC calls (#14182) [#14189](https://github.com/vitessio/vitess/pull/14189)
+#### Query Serving
+ * fix: GetField to use existing session for query [#13219](https://github.com/vitessio/vitess/pull/13219)
+ * VReplication Workflows: make sequence tables follow routing rules [#13238](https://github.com/vitessio/vitess/pull/13238)
+ * Adding random query generation for endtoend testing of the Gen4 planner [#13260](https://github.com/vitessio/vitess/pull/13260)
+ * Bug fix: SQL queries erroring with message `unknown aggregation random` [#13330](https://github.com/vitessio/vitess/pull/13330)
+ * bugfixes: collection of fixes to bugs found while fuzzing [#13332](https://github.com/vitessio/vitess/pull/13332)
+ * bug: don't always wrap aggregation in coalesce [#13348](https://github.com/vitessio/vitess/pull/13348)
+ * Random selection of keyspace based on available tablet [#13359](https://github.com/vitessio/vitess/pull/13359)
+ * Enable Tcp keep alive and provide keep alive period setting [#13434](https://github.com/vitessio/vitess/pull/13434)
+ * Improving random query generation for endtoend testing [#13460](https://github.com/vitessio/vitess/pull/13460)
+ * ignore ongoing backfill vindex from routing selection [#13505](https://github.com/vitessio/vitess/pull/13505)
+ * [release-17.0] ignore ongoing backfill vindex from routing selection [#13523](https://github.com/vitessio/vitess/pull/13523)
+ * Fix flaky vtgate test TestInconsistentStateDetectedBuffering [#13560](https://github.com/vitessio/vitess/pull/13560)
+ * Fix show character set [#13565](https://github.com/vitessio/vitess/pull/13565)
+ * vtgate: fix race condition iterating tables and views from schema tracker [#13673](https://github.com/vitessio/vitess/pull/13673)
+ * sqlparser: Track if original default value is a literal [#13730](https://github.com/vitessio/vitess/pull/13730)
+ * Fix for "text type with an unknown/unsupported collation cannot be hashed" error [#13852](https://github.com/vitessio/vitess/pull/13852)
+ * VTGate Buffering: Use a more accurate heuristic for determining if we're doing a reshard [#13856](https://github.com/vitessio/vitess/pull/13856)
+ * sqlparser: Tablespace option is case sensitive [#13884](https://github.com/vitessio/vitess/pull/13884)
+ * Rewrite `USING` to `ON` condition for joins [#13931](https://github.com/vitessio/vitess/pull/13931)
+ * handle large number of predicates without timing out [#13979](https://github.com/vitessio/vitess/pull/13979)
+ * Fix `NOT IN` expression used in the SET NULL for a child table on an update [#13988](https://github.com/vitessio/vitess/pull/13988)
+ * Fix the `SELECT` query we run on the child table to verify that update is allowed on a RESTRICT constraint [#13991](https://github.com/vitessio/vitess/pull/13991)
+ * fix data race in join engine primitive olap streaming mode execution [#14012](https://github.com/vitessio/vitess/pull/14012)
+ * fix: cost to include subshard opcode [#14023](https://github.com/vitessio/vitess/pull/14023)
+ * Add session flag for stream execute grpc api [#14046](https://github.com/vitessio/vitess/pull/14046)
+ * Fix cascading Delete failure while using Prepared statements [#14048](https://github.com/vitessio/vitess/pull/14048)
+ * Fix Fk verification and update queries to accommodate for bindVariables being NULL [#14061](https://github.com/vitessio/vitess/pull/14061)
+ * DDL execution to commit open transaction [#14110](https://github.com/vitessio/vitess/pull/14110)
+ * fix: analyze statement parsing and planning [#14268](https://github.com/vitessio/vitess/pull/14268)
+ * [release-18.0] fix: analyze statement parsing and planning (#14268) [#14275](https://github.com/vitessio/vitess/pull/14275)
+ * [release-18.0] schemadiff: fix missing `DROP CONSTRAINT` in duplicate/redundant constraints scenario. (#14387) [#14391](https://github.com/vitessio/vitess/pull/14391)
+ * [release-18.0] vtgate/engine: Fix race condition in join logic (#14435) [#14441](https://github.com/vitessio/vitess/pull/14441)
+#### Schema Tracker
+ * Vttablet schema tracking: Fix _vt.schema_version corruption [#13045](https://github.com/vitessio/vitess/pull/13045)
+ * Ignore error while reading table data in Schema.Engine reload [#13421](https://github.com/vitessio/vitess/pull/13421)
+ * schema.Reload(): ignore column reading errors for views only, error for tables [#13442](https://github.com/vitessio/vitess/pull/13442)
+#### TabletManager
+ * mysqlctl: Correctly encode database and table names [#13312](https://github.com/vitessio/vitess/pull/13312)
+ * Fix remote VersionString API [#13484](https://github.com/vitessio/vitess/pull/13484)
+ * mysqlctl: Remove noisy log line [#13599](https://github.com/vitessio/vitess/pull/13599)
+ * GetSchema: limit concurrent operations [#13617](https://github.com/vitessio/vitess/pull/13617)
+ * mysqlctl: Reduce logging for running commands [#13659](https://github.com/vitessio/vitess/pull/13659)
+#### Throttler
+ * Tablet throttler: only start watching SrvKeyspace once it's confirmed to exist [#13384](https://github.com/vitessio/vitess/pull/13384)
+ * Throttler: reintroduce deprecated flags so that deprecation actually works [#13597](https://github.com/vitessio/vitess/pull/13597)
+ * Silence 'CheckThrottler' gRPC calls [#13925](https://github.com/vitessio/vitess/pull/13925)
+ * Tablet throttler: empty list of probes on non-leader [#13926](https://github.com/vitessio/vitess/pull/13926)
+ * [release-18.0] Throttler: set timeouts on gRPC communication and on topo communication (#14165) [#14167](https://github.com/vitessio/vitess/pull/14167)
+ * Tablet throttler: fix race condition by removing goroutine call [#14179](https://github.com/vitessio/vitess/pull/14179)
+ * [release-18.0] Tablet throttler: fix race condition by removing goroutine call (#14179) [#14198](https://github.com/vitessio/vitess/pull/14198)
+#### VReplication
+ * VReplication: Fix VDiff2 DeleteByUUID Query [#13255](https://github.com/vitessio/vitess/pull/13255)
+ * Better handling of vreplication setState() failure [#13488](https://github.com/vitessio/vitess/pull/13488)
+ * VReplication: Ignore unrelated shards in partial MoveTables traffic state [#13515](https://github.com/vitessio/vitess/pull/13515)
+ * VReplication: Ensure ROW events are sent within a transaction [#13547](https://github.com/vitessio/vitess/pull/13547)
+ * VReplication: Make Source Tablet Selection More Robust [#13582](https://github.com/vitessio/vitess/pull/13582)
+ * vtgate tablet gateway buffering: don't shutdown if not initialized [#13695](https://github.com/vitessio/vitess/pull/13695)
+ * VReplication: Improve MoveTables Create Error Handling [#13737](https://github.com/vitessio/vitess/pull/13737)
+ * Minor --initialize-target-sequences followups [#13758](https://github.com/vitessio/vitess/pull/13758)
+ * Flakes: skip flaky check that ETA for a VReplication VDiff2 Progress command is in the future. [#13804](https://github.com/vitessio/vitess/pull/13804)
+ * Flakes: VReplication unit tests: reduce goroutine leakage [#13824](https://github.com/vitessio/vitess/pull/13824)
+ * Properly support ignore_nulls in CreateLookupVindex [#13913](https://github.com/vitessio/vitess/pull/13913)
+ * VReplication: Handle SQL NULL and JSON 'null' correctly for JSON columns [#13944](https://github.com/vitessio/vitess/pull/13944)
+ * copy over existing vreplication rows copied to local counter if resuming from another tablet [#13949](https://github.com/vitessio/vitess/pull/13949)
+ * VDiff: correct handling of default source and target cells [#13969](https://github.com/vitessio/vitess/pull/13969)
+ * MoveTables Cancel: drop denied tables on target when dropping source/target tables [#14008](https://github.com/vitessio/vitess/pull/14008)
+ * VReplication VPlayer: set foreign_key_checks on initialization [#14013](https://github.com/vitessio/vitess/pull/14013)
+ * json: Fix quoting JSON keys [#14066](https://github.com/vitessio/vitess/pull/14066)
+ * VDiff: properly split cell values in record when using TabletPicker [#14099](https://github.com/vitessio/vitess/pull/14099)
+ * VDiff: Cleanup the controller for a VDiff before deleting it [#14107](https://github.com/vitessio/vitess/pull/14107)
+ * [release-18.0] VReplication: error on vtctldclient commands w/o tablet types (#14294) [#14298](https://github.com/vitessio/vitess/pull/14298)
+ * [release-18.0] Vtctld SwitchReads: fix bug where writes were also being switched as part of switching reads when all traffic was switched using SwitchTraffic (#14360) [#14379](https://github.com/vitessio/vitess/pull/14379)
+ * [release-18.0] VDiff: wait for shard streams of one table diff to complete for before starting that of the next table (#14345) [#14382](https://github.com/vitessio/vitess/pull/14382)
+ * [release-18.0] VDiff tablet selection: pick non-serving tablets in Reshard workflows (#14413) [#14418](https://github.com/vitessio/vitess/pull/14418)
+ * VReplication: Handle multiple streams in UpdateVReplicationWorkflow RPC [#14447](https://github.com/vitessio/vitess/pull/14447)
+ * [release-18.0] VDiff: "show all" should only report vdiffs for the specified keyspace and workflow (#14442) [#14466](https://github.com/vitessio/vitess/pull/14466)
+ * [release-18.0] VReplication: Handle multiple streams in UpdateVReplicationWorkflow RPC (#14447) [#14468](https://github.com/vitessio/vitess/pull/14468)
+#### VTAdmin
+ * Unset the PREFIX environment variable when building VTAdmin [#13554](https://github.com/vitessio/vitess/pull/13554)
+#### VTCombo
+ * Fix vtcombo DBDDL plugin race condition [#13117](https://github.com/vitessio/vitess/pull/13117)
+#### VTorc
+ * Ensure to call `servenv.Init` when needed [#13638](https://github.com/vitessio/vitess/pull/13638)
+#### vtctl
+ * [release-18.0] VReplication: Add missing info to vtctldclient workflow SHOW output (#14225) [#14240](https://github.com/vitessio/vitess/pull/14240)
+### CI/Build
+#### Backup and Restore
+ * Refactor `backup_pitr` into two distinct CI tests: builtin vs Xtrabackup [#13395](https://github.com/vitessio/vitess/pull/13395)
+ * Fixing `backup_pitr` flaky tests via wait-for loop on topo reads [#13781](https://github.com/vitessio/vitess/pull/13781)
+ * [release-18.0] Incremental backup: fix race condition in reading 'mysqlbinlog' output (#14330) [#14335](https://github.com/vitessio/vitess/pull/14335)
+#### Build/CI
+ * Update a number of dependencies [#13031](https://github.com/vitessio/vitess/pull/13031)
+ * Cleanup unused Dockerfile entries [#13327](https://github.com/vitessio/vitess/pull/13327)
+ * flags: Remove hardcoded runner paths [#13482](https://github.com/vitessio/vitess/pull/13482)
+ * added no-commit-collection option to launchable record build command [#13490](https://github.com/vitessio/vitess/pull/13490)
+ * Replace deprecated `github.com/golang/mock` with `go.uber.org/mock` [#13512](https://github.com/vitessio/vitess/pull/13512)
+ * [viper WatchConfig] platform-specific write to ensure callback fires exactly once [#13627](https://github.com/vitessio/vitess/pull/13627)
+ * build: Allow passing in custom -ldflags [#13748](https://github.com/vitessio/vitess/pull/13748)
+ * Run auto golang upgrade only on vitessio/vitess [#13766](https://github.com/vitessio/vitess/pull/13766)
+ * collations: implement collation dumping as a docker image [#13879](https://github.com/vitessio/vitess/pull/13879)
+#### Docker
+ * docker/k8s: add bookworm builds [#13436](https://github.com/vitessio/vitess/pull/13436)
+ * Bump docker images to `bullseye` [#13664](https://github.com/vitessio/vitess/pull/13664)
+#### Documentation
+ * fix docgen for subcommands [#13518](https://github.com/vitessio/vitess/pull/13518)
+ * update docgen to embed commit ID in autogenerated doc frontmatter [#14056](https://github.com/vitessio/vitess/pull/14056)
+#### General
+ * go/mysql: switch to new API for x/exp/slices.SortFunc [#13644](https://github.com/vitessio/vitess/pull/13644)
+ * [main] Upgrade the Golang version to `go1.21.1` [#13933](https://github.com/vitessio/vitess/pull/13933)
+ * [release-18.0] Upgrade the Golang version to `go1.21.2` [#14195](https://github.com/vitessio/vitess/pull/14195)
+ * [release-18.0] Upgrade the Golang version to `go1.21.3` [#14230](https://github.com/vitessio/vitess/pull/14230)
+#### Online DDL
+ * CI: fix onlineddl_scheduler flakiness [#13754](https://github.com/vitessio/vitess/pull/13754)
+ * [release-18.0] OnlineDDL: reduce vrepl_stress workload in forks (#14302) [#14349](https://github.com/vitessio/vitess/pull/14349)
+#### Query Serving
+ * Endtoend: stress tests for VTGate FOREIGN KEY support [#13799](https://github.com/vitessio/vitess/pull/13799)
+ * ci: pool-related test flakyness [#14076](https://github.com/vitessio/vitess/pull/14076)
+#### Throttler
+ * Deprecating and removing tablet throttler CLI flags and tests [#13246](https://github.com/vitessio/vitess/pull/13246)
+ * Throttler: verify deprecated flags are still allowed [#13615](https://github.com/vitessio/vitess/pull/13615)
+#### VReplication
+ * Flakes: Remove CI endtoend test for VReplication Copy Phase Throttling [#13343](https://github.com/vitessio/vitess/pull/13343)
+ * Flakes: Improve reliability of vreplication_copy_parallel test [#13857](https://github.com/vitessio/vitess/pull/13857)
+#### VTAdmin
+ * Improve time taken to run the examples by optimizing `vtadmin` build [#13262](https://github.com/vitessio/vitess/pull/13262)
+#### VTorc
+ * [release-18.0] docker: add dedicated vtorc container (#14126) [#14148](https://github.com/vitessio/vitess/pull/14148)
+### Dependabot
+#### General
+ * Bump word-wrap from 1.2.3 to 1.2.4 in /web/vtadmin [#13569](https://github.com/vitessio/vitess/pull/13569)
+ * Bump tough-cookie from 4.1.2 to 4.1.3 in /web/vtadmin [#13767](https://github.com/vitessio/vitess/pull/13767)
+ * [release-18.0] Bump github.com/cyphar/filepath-securejoin from 0.2.3 to 0.2.4 (#14239) [#14253](https://github.com/vitessio/vitess/pull/14253)
+ * [release-18.0] Bump golang.org/x/net from 0.14.0 to 0.17.0 (#14260) [#14264](https://github.com/vitessio/vitess/pull/14264)
+#### Java
+ * java: update to latest dependencies for grpc and protobuf [#13996](https://github.com/vitessio/vitess/pull/13996)
+#### Observability
+ * Bump tough-cookie and @cypress/request in /vitess-mixin/e2e [#13768](https://github.com/vitessio/vitess/pull/13768)
+#### VTAdmin
+ * build(deps-dev): bump vite from 4.2.1 to 4.2.3 in /web/vtadmin [#13240](https://github.com/vitessio/vitess/pull/13240)
+ * Bump protobufjs from 7.2.3 to 7.2.5 in /web/vtadmin [#13833](https://github.com/vitessio/vitess/pull/13833)
+ * [release-18.0] Bump postcss from 8.4.21 to 8.4.31 in /web/vtadmin (#14173) [#14258](https://github.com/vitessio/vitess/pull/14258)
+ * [release-18.0] Bump @babel/traverse from 7.21.4 to 7.23.2 in /web/vtadmin (#14304) [#14308](https://github.com/vitessio/vitess/pull/14308)
+### Documentation
+#### CLI
+ * gentler warning message on config-not-found [#13215](https://github.com/vitessio/vitess/pull/13215)
+ * switch casing in onlineddl subcommand help text [#14091](https://github.com/vitessio/vitess/pull/14091)
+ * [release-18.0] Bypass cobra completion commands so they still function (#14217) [#14234](https://github.com/vitessio/vitess/pull/14234)
+#### Documentation
+ * Add security audit report [#13221](https://github.com/vitessio/vitess/pull/13221)
+ * update link for reparenting guide [#13350](https://github.com/vitessio/vitess/pull/13350)
+ * anonymize homedirs in generated docs [#14101](https://github.com/vitessio/vitess/pull/14101)
+ * Summary changes for foreign keys [#14112](https://github.com/vitessio/vitess/pull/14112)
+ * fix bad copy-paste in zkctld docgen [#14123](https://github.com/vitessio/vitess/pull/14123)
+ * [release-18.0] release notes: edit summary for consistency (#14319) [#14320](https://github.com/vitessio/vitess/pull/14320)
+#### General
+ * Improve release process documentation [#14000](https://github.com/vitessio/vitess/pull/14000)
+#### Governance
+ * governance doc clean up [#13337](https://github.com/vitessio/vitess/pull/13337)
+### Enhancement
+#### Backup and Restore
+ * go/vt/mysqlctl: instrument s3 upload time [#12500](https://github.com/vitessio/vitess/pull/12500)
+ * metrics: change vtbackup_duration_by_phase to binary-valued vtbackup_phase [#12973](https://github.com/vitessio/vitess/pull/12973)
+ * Incremental backup & recovery: restore-to-timestamp [#13270](https://github.com/vitessio/vitess/pull/13270)
+ * backup: Allow for upgrade safe backups [#13449](https://github.com/vitessio/vitess/pull/13449)
+ * Incremental backup: accept GTID position without 'MySQL56/' flavor prefix [#13474](https://github.com/vitessio/vitess/pull/13474)
+ * Backup & Restore: vtctldclient to support PITR flags [#13513](https://github.com/vitessio/vitess/pull/13513)
+ * BackupShard: support incremental backup [#13522](https://github.com/vitessio/vitess/pull/13522)
+ * Point in time recovery: fix cross-tablet GTID evaluation [#13555](https://github.com/vitessio/vitess/pull/13555)
+ * Backup/restore: provision and restore a tablet with point-in-time recovery flags [#13964](https://github.com/vitessio/vitess/pull/13964)
+ * go/cmd/vtbackup: report replication status metrics during catch-up phase [#13995](https://github.com/vitessio/vitess/pull/13995)
+#### Build/CI
+ * Set the number of threads for release notes generation with a flag [#13273](https://github.com/vitessio/vitess/pull/13273)
+ * Optimize `make build` in `test.go` and in CI [#13567](https://github.com/vitessio/vitess/pull/13567)
+ * Skip VTAdmin build in more places [#13588](https://github.com/vitessio/vitess/pull/13588)
+ * Skip VTAdmin build in Docker tests [#13836](https://github.com/vitessio/vitess/pull/13836)
+ * Migrates most workflows to 4 and 16 cores Large GitHub-Hosted-Runners [#13845](https://github.com/vitessio/vitess/pull/13845)
+ * Skip launchable if the Pull Request is marked as a Draft [#13886](https://github.com/vitessio/vitess/pull/13886)
+ * [release-18.0] Automatic approval of `vitess-bot` clean backports (#14352) [#14357](https://github.com/vitessio/vitess/pull/14357)
+#### CLI
+ * Vtctldclient MoveTables [#13015](https://github.com/vitessio/vitess/pull/13015)
+ * migrate vtorc to use cobra commands [#13917](https://github.com/vitessio/vitess/pull/13917)
+#### Cluster management
+ * increase length of reparent_journal columns [#13287](https://github.com/vitessio/vitess/pull/13287)
+ * Improvements to PRS [#13623](https://github.com/vitessio/vitess/pull/13623)
+ * Add 2 more durability policies that allow RDONLY tablets to send semi-sync ACKs [#13698](https://github.com/vitessio/vitess/pull/13698)
+ * `vtctld`/`vtorc`: improve reparenting stats [#13723](https://github.com/vitessio/vitess/pull/13723)
+#### Documentation
+ * consolidate docs [#13959](https://github.com/vitessio/vitess/pull/13959)
+#### Evalengine
+ * evalengine: implement date/time math [#13274](https://github.com/vitessio/vitess/pull/13274)
+ * sqlparser: Add support for TIMESTAMPADD [#13314](https://github.com/vitessio/vitess/pull/13314)
+ * mysql: introduce icuregex package [#13391](https://github.com/vitessio/vitess/pull/13391)
+ * icuregex: Lazy load ICU data into memory [#13640](https://github.com/vitessio/vitess/pull/13640)
+ * evalengine: Improve weight string support [#13658](https://github.com/vitessio/vitess/pull/13658)
+ * evalengine: Fix JSON weight string computation [#13669](https://github.com/vitessio/vitess/pull/13669)
+#### Examples
+ * Misc Local Install improvements. [#13446](https://github.com/vitessio/vitess/pull/13446)
+#### General
+ * Refactor code to remove `evalengine` as a dependency of `VTOrc` [#13642](https://github.com/vitessio/vitess/pull/13642)
+#### Observability
+ * vtorc: add detected_problems counter [#13967](https://github.com/vitessio/vitess/pull/13967)
+#### Online DDL
+ * `vtctl OnlineDDL`: complete command set [#12963](https://github.com/vitessio/vitess/pull/12963)
+ * Online DDL: improved row estimation via ANALYE TABLE with --analyze-table strategy flag [#13352](https://github.com/vitessio/vitess/pull/13352)
+ * OnlineDDL: support @@migration_context in vtgate session. Use if non-empty [#13675](https://github.com/vitessio/vitess/pull/13675)
+ * Vtgate: pass 'SHOW VITESS_MIGRATIONS' to tablet's query executor [#13726](https://github.com/vitessio/vitess/pull/13726)
+ * vtctldclient OnlineDDL CANCEL [#13860](https://github.com/vitessio/vitess/pull/13860)
+ * vtctldclient: support OnlineDDL `complete`, `launch` commands [#13896](https://github.com/vitessio/vitess/pull/13896)
+ * [release-18.0] Online DDL: lint DDL strategy flags (#14373) [#14399](https://github.com/vitessio/vitess/pull/14399)
+#### Query Serving
+ * vindexes: return unknown params [#12951](https://github.com/vitessio/vitess/pull/12951)
+ * Fix and Make aggregation planner handle aggregation functions better [#13228](https://github.com/vitessio/vitess/pull/13228)
+ * vtgate planner: HAVING in the new operator horizon planner [#13289](https://github.com/vitessio/vitess/pull/13289)
+ * Support complex aggregation in Gen4's Operators [#13326](https://github.com/vitessio/vitess/pull/13326)
+ * Adds support for ANY_VALUE [#13342](https://github.com/vitessio/vitess/pull/13342)
+ * Aggregation engine refactor [#13378](https://github.com/vitessio/vitess/pull/13378)
+ * Move more horizon planning to the operators [#13412](https://github.com/vitessio/vitess/pull/13412)
+ * Move UNION planning to the operators [#13450](https://github.com/vitessio/vitess/pull/13450)
+ * Improve and Fix Distinct Aggregation planner [#13466](https://github.com/vitessio/vitess/pull/13466)
+ * Enhancing VTGate buffering for MoveTables and Shard by Shard Migration [#13507](https://github.com/vitessio/vitess/pull/13507)
+ * Add 2 new metrics with tablet type labels [#13521](https://github.com/vitessio/vitess/pull/13521)
+ * vtgate table schema tracking to use GetSchema rpc [#13544](https://github.com/vitessio/vitess/pull/13544)
+ * Add a `keyspace` configuration in the `vschema` for foreign key mode [#13553](https://github.com/vitessio/vitess/pull/13553)
+ * Reduce usages of old horizon planning fallback [#13595](https://github.com/vitessio/vitess/pull/13595)
+ * Add dry-run/monitoring-only mode for TxThrottler [#13604](https://github.com/vitessio/vitess/pull/13604)
+ * go/vt/vitessdriver: implement driver.{Connector,DriverContext} [#13704](https://github.com/vitessio/vitess/pull/13704)
+ * More union merging [#13743](https://github.com/vitessio/vitess/pull/13743)
+ * Move subqueries to use the operator model [#13750](https://github.com/vitessio/vitess/pull/13750)
+ * Add support for tuple as value type [#13800](https://github.com/vitessio/vitess/pull/13800)
+ * icuregex: Update to ICU 73 [#13912](https://github.com/vitessio/vitess/pull/13912)
+ * Change internal vindex type recommendation for integrals to xxhash [#13956](https://github.com/vitessio/vitess/pull/13956)
+ * Foreign key cascade: retain "for update" lock on select query plans [#13985](https://github.com/vitessio/vitess/pull/13985)
+ * Improve the rewriter to simplify more queries [#14059](https://github.com/vitessio/vitess/pull/14059)
+ * [release-18.0] gen4: Support explicit column aliases on derived tables (#14129) [#14156](https://github.com/vitessio/vitess/pull/14156)
+#### Schema Tracker
+ * vttablet: do not notify `vtgate` about internal tables [#13897](https://github.com/vitessio/vitess/pull/13897)
+#### TabletManager
+ * Tablet throttler: throttled app configuration via `vtctl UpdateThrottlerConfig` [#13351](https://github.com/vitessio/vitess/pull/13351)
+#### Throttler
+ * txthrottler: verify config at vttablet startup, consolidate funcs [#13115](https://github.com/vitessio/vitess/pull/13115)
+ * txthrottler: add metrics for topoWatcher and healthCheckStreamer [#13153](https://github.com/vitessio/vitess/pull/13153)
+ * `UpdateThrottlerConfig --unthrottle-app ...` [#13494](https://github.com/vitessio/vitess/pull/13494)
+ * Reroute 'ALTER VITESS_MIGRATION ... THROTTLE ...' through topo [#13511](https://github.com/vitessio/vitess/pull/13511)
+ * Tablet throttler: inter-checks via gRPC [#13514](https://github.com/vitessio/vitess/pull/13514)
+ * Per workload TxThrottler metrics [#13526](https://github.com/vitessio/vitess/pull/13526)
+ * Throttler: exempt apps via `UpdateThrottlerConfig --throttle-app-exempt` [#13666](https://github.com/vitessio/vitess/pull/13666)
+#### Topology
+ * Support arbitrary ZooKeeper config lines [#13829](https://github.com/vitessio/vitess/pull/13829)
+#### VReplication
+ * MoveTables: allow copying all tables in a single atomic copy phase cycle [#13137](https://github.com/vitessio/vitess/pull/13137)
+ * VReplication: More intelligently manage vschema table entries on unsharded targets [#13220](https://github.com/vitessio/vitess/pull/13220)
+ * MoveTables sequence e2e tests: change terminology to use basic vs simple everywhere for partial movetables workflows [#13435](https://github.com/vitessio/vitess/pull/13435)
+ * wrangler,workflow/workflow: materialize from intersecting source shards based on primary vindexes [#13782](https://github.com/vitessio/vitess/pull/13782)
+ * Implement Reshard in vtctldclient [#13792](https://github.com/vitessio/vitess/pull/13792)
+ * VDiff: Migrate client command to vtctldclient [#13976](https://github.com/vitessio/vitess/pull/13976)
+ * Migrate vreplication commands to vtctldclient: Mount and Migrate [#14174](https://github.com/vitessio/vitess/pull/14174)
+ * [release-18.0] Migrate CreateLookupVindex and ExternalizeVindex to vtctldclient (#14086) [#14183](https://github.com/vitessio/vitess/pull/14183)
+ * Migrate Materialize command to vtctldclient [#14184](https://github.com/vitessio/vitess/pull/14184)
+ * [Release 18.0] Backport of #17174 [#14210](https://github.com/vitessio/vitess/pull/14210)
+ * [release-18.0] Migrate Materialize command to vtctldclient (#14184) [#14214](https://github.com/vitessio/vitess/pull/14214)
+ * [release-18.0] VReplication: Add traffic state to vtctldclient workflow status output (#14280) [#14282](https://github.com/vitessio/vitess/pull/14282)
+ * [release-18.0] VReplication: Add --all-cells flag to create sub-commands (#14341) [#14343](https://github.com/vitessio/vitess/pull/14343)
+#### VTAdmin
+ * [release-18.0] Optimize the GetWorkflows RPC (#14212) [#14233](https://github.com/vitessio/vitess/pull/14233)
+#### VTCombo
+ * `vttestserver`: persist vschema changes in `--persistent_mode` [#13065](https://github.com/vitessio/vitess/pull/13065)
+#### VTorc
+ * Improve VTOrc failure detection to be able to better handle dead primary failures [#13190](https://github.com/vitessio/vitess/pull/13190)
+ * Add flag to VTOrc to enable/disable its ability to run ERS [#13259](https://github.com/vitessio/vitess/pull/13259)
+ * Add metric for showing the errant GTIDs in VTOrc [#13281](https://github.com/vitessio/vitess/pull/13281)
+ * Add timestamp to vtorc debug page [#13379](https://github.com/vitessio/vitess/pull/13379)
+ * Augment VTOrc to also store the shard records and use it to better judge Primary recoveries [#13587](https://github.com/vitessio/vitess/pull/13587)
+ * Fix a couple of logs in VTOrc [#13667](https://github.com/vitessio/vitess/pull/13667)
+ * Errant GTID Metrics Refactor [#13670](https://github.com/vitessio/vitess/pull/13670)
+ * VTOrc converts a tablet to DRAINED type if it detects errant GTIDs on it [#13873](https://github.com/vitessio/vitess/pull/13873)
+#### vtctl
+ * vtctl,vindexes: logs warnings and export stat for unknown vindex params [#13322](https://github.com/vitessio/vitess/pull/13322)
+ * vtctldclient OnlineDDL: support `throttle`, `unthrottle` [#13916](https://github.com/vitessio/vitess/pull/13916)
+#### web UI
+ * Add vtsql flags to vtadmin [#13674](https://github.com/vitessio/vitess/pull/13674)
+### Feature Request
+#### CLI
+ * [vtctld] more cobra binaries [#13930](https://github.com/vitessio/vitess/pull/13930)
+ * [cobra] vtgate and vttablet [#13943](https://github.com/vitessio/vitess/pull/13943)
+ * [cli] migrate mysqlctl and mysqlctld to cobra [#13946](https://github.com/vitessio/vitess/pull/13946)
+ * [CLI] cobra lots of things [#14007](https://github.com/vitessio/vitess/pull/14007)
+ * miscellaneous cobras [#14069](https://github.com/vitessio/vitess/pull/14069)
+ * [cli] cobra zookeeper [#14094](https://github.com/vitessio/vitess/pull/14094)
+#### Online DDL
+ * Add OnlineDDL show support [#13738](https://github.com/vitessio/vitess/pull/13738)
+ * [onlineddl] retry and cleanup [#13830](https://github.com/vitessio/vitess/pull/13830)
+#### Query Serving
+ * Add group_concat aggregation support [#13331](https://github.com/vitessio/vitess/pull/13331)
+ * Add support for kill statement [#13371](https://github.com/vitessio/vitess/pull/13371)
+ * Build foreign key definition in schema tracker [#13657](https://github.com/vitessio/vitess/pull/13657)
+ * Foreign Keys: `INSERT` planning [#13676](https://github.com/vitessio/vitess/pull/13676)
+ * Foreign Keys: `DELETE` planning [#13746](https://github.com/vitessio/vitess/pull/13746)
+ * Foreign Keys: `UPDATE` planning [#13762](https://github.com/vitessio/vitess/pull/13762)
+ * Add Foreign key Cascade engine primitive [#13802](https://github.com/vitessio/vitess/pull/13802)
+ * Foreign key cascade planning for DELETE and UPDATE queries [#13823](https://github.com/vitessio/vitess/pull/13823)
+ * Add Foreign key verify constraint engine primitive [#13848](https://github.com/vitessio/vitess/pull/13848)
+ * Add VSchema DDL support for dropping sequence and auto increment [#13882](https://github.com/vitessio/vitess/pull/13882)
+ * Update Cascade Planning leading to Foreign key constraint verification [#13902](https://github.com/vitessio/vitess/pull/13902)
+ * Disallow Insert with Duplicate key update and Replace Into queries on foreign key column, set locks on fk queries [#13953](https://github.com/vitessio/vitess/pull/13953)
+#### VReplication
+ * VReplication: Initialize Sequence Tables Used By Tables Being Moved [#13656](https://github.com/vitessio/vitess/pull/13656)
+ * MoveTables: add flag to specify that routing rules should not be created when a movetables workflow is created [#13895](https://github.com/vitessio/vitess/pull/13895)
+### Internal Cleanup
+#### Build/CI
+ * docker/k8s: Cleanup done TODO [#13347](https://github.com/vitessio/vitess/pull/13347)
+ * Remove unused chromedriver [#13573](https://github.com/vitessio/vitess/pull/13573)
+ * docker/bootstrap: remove --no-cache flag [#13785](https://github.com/vitessio/vitess/pull/13785)
+#### CLI
+ * remove query_analyzer binary and release [#14055](https://github.com/vitessio/vitess/pull/14055)
+ * [release-18.0] Make vtctldclient mount command more standard (#14281) [#14283](https://github.com/vitessio/vitess/pull/14283)
+#### Cluster management
+ * Fix logging by omitting the host and port in `SetReadOnly` [#13470](https://github.com/vitessio/vitess/pull/13470)
+ * Improve logging and renaming PrimaryTermStartTimestamp in vttablets [#13625](https://github.com/vitessio/vitess/pull/13625)
+#### Evalengine
+ * collations: Refactor to separate basic collation information from data [#13868](https://github.com/vitessio/vitess/pull/13868)
+#### Examples
+ * docker/mini: remove refs to orc configs [#13495](https://github.com/vitessio/vitess/pull/13495)
+#### General
+ * servenv: Allow for explicit bind address [#13188](https://github.com/vitessio/vitess/pull/13188)
+ * Remove `out.txt` and add `release-17.0` to go upgrade automation [#13261](https://github.com/vitessio/vitess/pull/13261)
+ * Deprecate VTGR [#13301](https://github.com/vitessio/vitess/pull/13301)
+ * mysql: Refactor dependencies [#13688](https://github.com/vitessio/vitess/pull/13688)
+ * Remove explicit usage of etcd v2 (api and storage) [#13791](https://github.com/vitessio/vitess/pull/13791)
+ * Go 1.21 cleanups [#13862](https://github.com/vitessio/vitess/pull/13862)
+ * [wrangler] cleanup unused functions [#13867](https://github.com/vitessio/vitess/pull/13867)
+ * [misc] Delete more unused functions, tidy up dupe imports [#13878](https://github.com/vitessio/vitess/pull/13878)
+ * Clean up deprecated slice header usage and unused code [#13880](https://github.com/vitessio/vitess/pull/13880)
+ * [misc] tidy imports [#13885](https://github.com/vitessio/vitess/pull/13885)
+ * [staticcheck] miscellaneous tidying [#13892](https://github.com/vitessio/vitess/pull/13892)
+ * [staticcheck] Cleanup deprecations [#13898](https://github.com/vitessio/vitess/pull/13898)
+ * Consolidate helper functions for working with proto3 time messages [#13905](https://github.com/vitessio/vitess/pull/13905)
+ * [staticcheck] Last few staticchecks! [#13909](https://github.com/vitessio/vitess/pull/13909)
+ * Remove deprecated flags before `v18.0.0` [#14071](https://github.com/vitessio/vitess/pull/14071)
+#### Observability
+ * stats: use *time.Ticker instead of time.After() [#13492](https://github.com/vitessio/vitess/pull/13492)
+#### Query Serving
+ * Operator planner refactor [#13294](https://github.com/vitessio/vitess/pull/13294)
+ * Refactor and add a comment to schema initialisation code [#13309](https://github.com/vitessio/vitess/pull/13309)
+ * vtgate v3 planner removal [#13458](https://github.com/vitessio/vitess/pull/13458)
+ * vtgate buffering logic: remove the deprecated healthcheck based implementation [#13584](https://github.com/vitessio/vitess/pull/13584)
+ * Refactor Expression and Statement Simplifier [#13636](https://github.com/vitessio/vitess/pull/13636)
+ * Remove duplicate ACL check in tabletserver handleHTTPConsolidations [#13876](https://github.com/vitessio/vitess/pull/13876)
+ * inputs method to return additional information about the input primitive [#13883](https://github.com/vitessio/vitess/pull/13883)
+ * refactor: move DML logic to sql_builder.go [#13920](https://github.com/vitessio/vitess/pull/13920)
+ * Fix `TestLeftJoinUsingUnsharded` and remove instability when running E2E locally [#13973](https://github.com/vitessio/vitess/pull/13973)
+ * Remove excessive logging in transactions [#14021](https://github.com/vitessio/vitess/pull/14021)
+ * moved timeout test to different package [#14028](https://github.com/vitessio/vitess/pull/14028)
+ * [release-18.0] Rename Foreign Key enum values in VSchema and drop `FK_` prefix (#14274) [#14299](https://github.com/vitessio/vitess/pull/14299)
+ * tx_throttler: remove topo watchers metric [#14444](https://github.com/vitessio/vitess/pull/14444)
+#### TabletManager
+ * mysqlctl: Use DBA connection for schema operations [#13178](https://github.com/vitessio/vitess/pull/13178)
+ * k8stopo: Include deprecation warning [#13299](https://github.com/vitessio/vitess/pull/13299)
+ * k8stopo: Remove the deprecated Kubernetes topo [#13303](https://github.com/vitessio/vitess/pull/13303)
+ * vtgr: Remove deprecated vtgr [#13308](https://github.com/vitessio/vitess/pull/13308)
+ * mysqlctl: Move more to use built in MySQL client [#13338](https://github.com/vitessio/vitess/pull/13338)
+#### Throttler
+ * `txthrottler`: remove `txThrottlerConfig` struct, rely on `tabletenv` [#13624](https://github.com/vitessio/vitess/pull/13624)
+#### VReplication
+ * Use sqlparser for all dynamic query building in VDiff2 [#13319](https://github.com/vitessio/vitess/pull/13319)
+ * vreplication: Move to use collations package [#13566](https://github.com/vitessio/vitess/pull/13566)
+#### VTAdmin
+ * [VTAdmin] Upgrade to use node 18.16.0 [#13288](https://github.com/vitessio/vitess/pull/13288)
+#### VTorc
+ * VTOrc: Update the primary key for all the tables from `hostname, port` to `alias` [#13243](https://github.com/vitessio/vitess/pull/13243)
+ * vtorc: Cleanup more unused code [#13354](https://github.com/vitessio/vitess/pull/13354)
+ * Improve lock action string [#13355](https://github.com/vitessio/vitess/pull/13355)
+ * Improve VTOrc logging statements, now that we have alias as a field [#13428](https://github.com/vitessio/vitess/pull/13428)
+ * Remove excessive logging in VTOrc APIs [#13459](https://github.com/vitessio/vitess/pull/13459)
+ * [release-16.0] Remove excessive logging in VTOrc APIs (#13459) [#13462](https://github.com/vitessio/vitess/pull/13462)
+#### vtctl
+ * [release-18.0] Move all examples to vtctldclient (#14226) [#14241](https://github.com/vitessio/vitess/pull/14241)
+#### vtexplain
+ * vtexplain: Fix passing through context for cleanup [#13900](https://github.com/vitessio/vitess/pull/13900)
+### Performance
+#### General
+ * proto: Faster clone [#13914](https://github.com/vitessio/vitess/pull/13914)
+#### Query Serving
+ * Cache info schema table info [#13724](https://github.com/vitessio/vitess/pull/13724)
+ * gen4: Fast aggregations [#13904](https://github.com/vitessio/vitess/pull/13904)
+ * Cache v3 [#13939](https://github.com/vitessio/vitess/pull/13939)
+ * Reduce network pressure on multi row insert [#14064](https://github.com/vitessio/vitess/pull/14064)
+ * VTGate FK stress tests suite: improvements [#14098](https://github.com/vitessio/vitess/pull/14098)
+#### TabletManager
+ * BaseShowTablesWithSizes: optimize MySQL 8.0 query [#13375](https://github.com/vitessio/vitess/pull/13375)
+ * Support views in BaseShowTablesWithSizes for MySQL 8.0 [#13394](https://github.com/vitessio/vitess/pull/13394)
+#### vtctl
+ * `ApplySchema`: support `--batch-size` flag in 'direct' strategy [#13693](https://github.com/vitessio/vitess/pull/13693)
+### Regression
+#### Backup and Restore
+ * Fix backup on s3 like storage [#14311](https://github.com/vitessio/vitess/pull/14311)
+ * [release-18.0] Fix backup on s3 like storage (#14311) [#14362](https://github.com/vitessio/vitess/pull/14362)
+#### Query Serving
+ * fix: ShardedRouting clone to clone slice of reference correctly [#13265](https://github.com/vitessio/vitess/pull/13265)
+ * Handle inconsistent state error in query buffering [#13333](https://github.com/vitessio/vitess/pull/13333)
+ * fix: insert with negative value [#14244](https://github.com/vitessio/vitess/pull/14244)
+ * [release-18.0] fix: insert with negative value (#14244) [#14247](https://github.com/vitessio/vitess/pull/14247)
+ * [release-18.0] use aggregation engine over distinct engine when overlapping order by (#14359) [#14361](https://github.com/vitessio/vitess/pull/14361)
+ * [release-18.0] Performance Fixes for Vitess 18 (#14383) [#14393](https://github.com/vitessio/vitess/pull/14393)
+ * [release-18.0] tuple: serialized form (#14392) [#14394](https://github.com/vitessio/vitess/pull/14394)
+### Release
+#### Build/CI
+ * Fix incorrect output in release scripts [#13385](https://github.com/vitessio/vitess/pull/13385)
+ * Optimize release notes generation to use GitHub Milestones [#13398](https://github.com/vitessio/vitess/pull/13398)
+#### CLI
+ * Add vtctldclient info to the 18.0 summary [#14259](https://github.com/vitessio/vitess/pull/14259)
+#### Documentation
+ * Add end-of-life documentation + re-organize internal documentation [#13401](https://github.com/vitessio/vitess/pull/13401)
+ * Update known issues in `v16.x` and `v17.0.0` [#13618](https://github.com/vitessio/vitess/pull/13618)
+#### General
+ * Copy v17.0.0-rc changelog to main [#13248](https://github.com/vitessio/vitess/pull/13248)
+ * Update release notes for 17.0.0-rc2 [#13306](https://github.com/vitessio/vitess/pull/13306)
+ * Forward port of release notes changes from v17.0.0 GA [#13370](https://github.com/vitessio/vitess/pull/13370)
+ * Add v15.0.4, v16.0.3, and v17.0.1 changelogs [#13661](https://github.com/vitessio/vitess/pull/13661)
+ * Copy release notes for v17.0.2 and v16.0.4 [#13811](https://github.com/vitessio/vitess/pull/13811)
+ * Code freeze of release-18.0 [#14131](https://github.com/vitessio/vitess/pull/14131)
+ * Release of v18.0.0-rc1 [#14136](https://github.com/vitessio/vitess/pull/14136)
+ * Back to dev mode after `v18.0.0-rc1` release [#14169](https://github.com/vitessio/vitess/pull/14169)
+ * Code freeze of release-18.0 [#14405](https://github.com/vitessio/vitess/pull/14405)
+### Testing
+#### Build/CI
+ * Flakes: Address TestMigrate Failures [#12866](https://github.com/vitessio/vitess/pull/12866)
+ * [vipersync] skip flaky test [#13501](https://github.com/vitessio/vitess/pull/13501)
+ * [vipersync] deflake TestWatchConfig [#13545](https://github.com/vitessio/vitess/pull/13545)
+ * Fix bug in `fileNameFromPosition` test helper [#13778](https://github.com/vitessio/vitess/pull/13778)
+ * Flakes: Delete VTDATAROOT files in reparent test teardown within CI [#13793](https://github.com/vitessio/vitess/pull/13793)
+ * CI: Misc test improvements to limit failures with various runners [#13825](https://github.com/vitessio/vitess/pull/13825)
+ * actually test vtcombo [#14095](https://github.com/vitessio/vitess/pull/14095)
+ * Remove FOSSA Test from CI until we can do it in a secure way [#14119](https://github.com/vitessio/vitess/pull/14119)
+#### Cluster management
+ * Fix `Fakemysqldaemon` to store the host and port after `SetReplicationSource` call [#13439](https://github.com/vitessio/vitess/pull/13439)
+ * Deflake `TestPlannedReparentShardPromoteReplicaFail` [#13548](https://github.com/vitessio/vitess/pull/13548)
+ * Flaky tests: Fix wrangler tests [#13568](https://github.com/vitessio/vitess/pull/13568)
+#### General
+ * [CI] deflake viper sync tests [#13185](https://github.com/vitessio/vitess/pull/13185)
+ * Remove `--disable_active_reparents` flag in vttablet-up.sh [#13504](https://github.com/vitessio/vitess/pull/13504)
+ * Add leak checking for vtgate tests [#13835](https://github.com/vitessio/vitess/pull/13835)
+#### Online DDL
+ * Fix potential panics due to "Fail in goroutine after test completed" [#13596](https://github.com/vitessio/vitess/pull/13596)
+ * [OnlineDDL] add label so break works as intended [#13691](https://github.com/vitessio/vitess/pull/13691)
+#### Query Serving
+ * Deflake `TestQueryTimeoutWithDual` test [#13405](https://github.com/vitessio/vitess/pull/13405)
+ * Fix `TestGatewayBufferingWhileReparenting` flakiness [#13469](https://github.com/vitessio/vitess/pull/13469)
+ * fix TestQueryTimeoutWithTables flaky test [#13579](https://github.com/vitessio/vitess/pull/13579)
+ * schemadiff: add time measure test for massive schema load and diff [#13697](https://github.com/vitessio/vitess/pull/13697)
+ * End to end testing suite for foreign keys [#13870](https://github.com/vitessio/vitess/pull/13870)
+ * Fix setup order to avoid races [#13871](https://github.com/vitessio/vitess/pull/13871)
+ * Use correct syntax in test [#13907](https://github.com/vitessio/vitess/pull/13907)
+ * test: added test to check binlogs to contain the cascade events [#13970](https://github.com/vitessio/vitess/pull/13970)
+ * E2E Fuzzing testing for foreign keys [#13980](https://github.com/vitessio/vitess/pull/13980)
+ * Fix foreign key plan tests expectation [#13997](https://github.com/vitessio/vitess/pull/13997)
+ * [release-18.0] vtgate: Allow more errors for the warning check (#14421) [#14423](https://github.com/vitessio/vitess/pull/14423)
+#### VReplication
+ * Flakes: remove non-determinism from vtctldclient MoveTables unit test [#13765](https://github.com/vitessio/vitess/pull/13765)
+ * Flakes: empty vtdataroot before starting a new vreplication e2e test [#13803](https://github.com/vitessio/vitess/pull/13803)
+ * Flakes: Add recently added 'select rows_copied' query to ignore list [#13993](https://github.com/vitessio/vitess/pull/13993)
+ * [release-18.0] TestStreamMigrateMainflow: fix panic in test [#14425](https://github.com/vitessio/vitess/pull/14425)
+#### VTorc
+ * Fix flakiness in `TestDeadPrimaryRecoversImmediately` [#13232](https://github.com/vitessio/vitess/pull/13232)
+ * Fix flakiness in VTOrc tests [#13489](https://github.com/vitessio/vitess/pull/13489)
+ * Skip flaky test `TestReadOutdatedInstanceKeys` [#13561](https://github.com/vitessio/vitess/pull/13561)
+ * Reintroduce `TestReadOutdatedInstanceKeys` with debugging information [#13562](https://github.com/vitessio/vitess/pull/13562)
+#### vtctl
+ * Fix merge conflict with new tests [#13869](https://github.com/vitessio/vitess/pull/13869)
+
diff --git a/changelog/18.0/18.0.0/release_notes.md b/changelog/18.0/18.0.0/release_notes.md
new file mode 100644
index 00000000000..9851245a648
--- /dev/null
+++ b/changelog/18.0/18.0.0/release_notes.md
@@ -0,0 +1,326 @@
+# Release of Vitess v18.0.0
+## Summary
+
+### Table of Contents
+
+- **[Major Changes](#major-changes)**
+ - **[Breaking Changes](#breaking-changes)**
+ - [Local examples now use etcd v3 storage and API](#local-examples-etcd-v3)
+ - **[New command line flags and behavior](#new-flag)**
+ - [VTOrc flag `--allow-emergency-reparent`](#new-flag-toggle-ers)
+ - [VTOrc flag `--change-tablets-with-errant-gtid-to-drained`](#new-flag-errant-gtid-convert)
+ - [ERS sub flag `--wait-for-all-tablets`](#new-ers-subflag)
+ - [VTGate flag `--grpc-send-session-in-streaming`](#new-vtgate-streaming-sesion)
+ - **[Experimental Foreign Key Support](#foreign-keys)**
+ - **[VTAdmin](#vtadmin)**
+ - [Updated to node v18.16.0](#update-node)
+ - **[Deprecations and Deletions](#deprecations-and-deletions)**
+ - [Legacy Client Binaries](#legacy-client-binaries)
+ - [Deprecated Flags](#deprecated-flags)
+ - [Deprecated Stats](#deprecated-stats)
+ - [Deleted Flags](#deleted-flags)
+ - [Deleted `V3` planner](#deleted-v3)
+ - [Deleted `k8stopo`](#deleted-k8stopo)
+ - [Deleted `vtgr`](#deleted-vtgr)
+ - [Deleted `query_analyzer`](#deleted-query_analyzer)
+ - **[New Stats](#new-stats)**
+ - [VTGate Vindex unknown parameters](#vtgate-vindex-unknown-parameters)
+ - [VTBackup stat `Phase`](#vtbackup-stat-phase)
+ - [VTBackup stat `PhaseStatus`](#vtbackup-stat-phase-status)
+ - [Backup and restore metrics for AWS S3](#backup-restore-metrics-aws-s3)
+ - [VTCtld and VTOrc reparenting stats](#vtctld-and-vtorc-reparenting-stats)
+ - **[VTTablet](#vttablet)**
+ - [VTTablet: New ResetSequences RPC](#vttablet-new-rpc-reset-sequences)
+ - **[Docker](#docker)**
+ - [Debian: Bookworm added and made default](#debian-bookworm)
+ - [Debian: Buster removed](#debian-buster)
+ - **[Durability Policies](#durability-policies)**
+ - [New Durability Policies](#new-durability-policies)
+
+## Major Changes
+
+### Breaking Changes
+
+#### Local examples now use etcd v3 storage and API
+In previous releases the [local examples](https://github.com/vitessio/vitess/tree/main/examples/local) were
+explicitly using etcd v2 storage (`etcd --enable-v2=true`) and API (`ETCDCTL_API=2`) mode. We have now
+removed this legacy etcd usage and instead use the new (default) etcd v3 storage and API. Please see
+[PR #13791](https://github.com/vitessio/vitess/pull/13791) for details. If you are using the local
+examples in any sort of long-term non-testing capacity, then you will need to explicitly use the v2 storage
+and API mode or [migrate your existing data from v2 to v3](https://etcd.io/docs/v3.5/tutorials/how-to-migrate/).
+
+### New command line flags and behavior
+
+#### VTOrc flag `--allow-emergency-reparent`
+
+VTOrc has a new flag `--allow-emergency-reparent` that specifies whether VTOrc is allowed to run emergency
+failover operations. Users that want VTOrc to fix replication issues, but don't want it to run any failovers
+should use this flag. This flag defaults to `true` which corresponds to the default behavior from prior releases.
+
+#### VTOrc flag `--change-tablets-with-errant-gtid-to-drained`
+
+VTOrc has a new flag `--change-tablets-with-errant-gtid-to-drained` that allows users to choose whether VTOrc should change the
+tablet type of tablets with errant GTIDs to `DRAINED`. By default, this flag is disabled.
+
+This feature allows users to configure VTOrc such that any tablet that encounters errant GTIDs is automatically taken out of the
+serving graph. These tablets can then be inspected for what the errant GTIDs are, and once fixed, they can rejoin the cluster.
+
+#### ERS sub flag `--wait-for-all-tablets`
+
+vtctldclient command `EmergencyReparentShard` has a new sub-flag `--wait-for-all-tablets` that makes `EmergencyReparentShard` wait
+for a response from all the tablets. Originally `EmergencyReparentShard` was meant only to be run when a primary tablet is unreachable.
+We have realized now that there are cases when replication is broken but all tablets are reachable. In these cases, it is advisable to
+call `EmergencyReparentShard` with `--wait-for-all-tablets` so that it does not ignore any of the tablets.
+
+#### VTGate GRPC stream execute session flag `--grpc-send-session-in-streaming`
+
+This flag enables transaction support on VTGate's `StreamExecute` gRPC API.
+When this is enabled, `StreamExecute` will return the session in the last packet of the response.
+Users should enable this flag only after client code has been changed to expect such a packet.
+
+It is disabled by default.
+
+### Experimental Foreign Key Support
+
+A new optional field `foreignKeyMode` has been added to the VSchema. This field can be provided for each keyspace. The VTGate flag `--foreign_key_mode` has been deprecated in favor of this field.
+
+There are 3 foreign key modes now supported in Vitess -
+1. `unmanaged` -
+ This mode represents the default behavior in Vitess, where it does not manage foreign key column references. Users are responsible for configuring foreign keys in MySQL in such a way that related rows, as determined by foreign keys, reside within the same shard.
+2. `managed` [EXPERIMENTAL] -
+ In this experimental mode, Vitess is fully aware of foreign key relationships and actively tracks foreign key constraints using the schema tracker. VTGate will handle DML operations with foreign keys and correctly cascade updates and deletes.
+ It will also verify `restrict` constraints and validate the existence of parent rows before inserting child rows.
+ This ensures that all child operations are logged in binary logs, unlike the InnoDB implementation of foreign keys.
+ This allows the usage of VReplication workflows with foreign keys.
+ Implementation details are documented in the [RFC for foreign keys](https://github.com/vitessio/vitess/issues/12967).
+3. `disallow` -
+ In this mode Vitess explicitly disallows any DDL statements that try to create a foreign key constraint. This mode is equivalent to running VTGate with the flag `--foreign_key_mode=disallow`.
+
+#### Upgrade process
+
+After upgrading from v17 to v18, users should specify the correct foreign key mode for all their keyspaces in the VSchema using the new property.
+Once this change has taken effect, the deprecated flag `--foreign_key_mode` can be dropped from all VTGates. Note that this is only required if running in `disallow` mode.
+No action is needed to use `unmanaged` mode.
+
+### VTAdmin
+
+#### vtadmin-web updated to node v18.16.0 (LTS)
+
+Building `vtadmin-web` now requires node >= v18.16.0 (LTS). Breaking changes from v16 to v18 are listed
+in https://nodejs.org/en/blog/release/v18, but none apply to VTAdmin. Full details on node v18.16.0 are listed
+on https://nodejs.org/en/blog/release/v18.16.0.
+
+### Deprecations and Deletions
+
+#### Legacy Client Binaries
+
+`vtctldclient` is our new modern *Vitess controller daemon* (`vtctld`) *client* – which you will use to perform commands
+and take actions in your Vitess clusters. It is [replacing the legacy `vtctl`/`vtctlclient` binaries](https://vitess.io/docs/18.0/reference/vtctldclient-transition/overview/).
+Some of the benefits are:
+
+- [Dedicated RPCs for each command](https://github.com/vitessio/vitess/blob/release-18.0/proto/vtctlservice.proto#L32-L353)
+that are used between `vtctldclient` and `vtctld` – this offers clean separation of commands and makes it easier to
+develop new features without impacting other commands. This also presents an [API that other clients (both Vitess and
+3rd party) can use to interface with Vitess](https://vitess.io/blog/2023-04-17-vtctldserver-api/).
+- Use of modern frameworks: [`pFlag`](https://github.com/spf13/pflag#readme), [`Cobra`](https://cobra.dev), and [`Viper`](https://github.com/spf13/viper#readme).
+This makes development easier while also offering a better UX. For example, this offers a way to use
+[configuration files](https://vitess.io/docs/18.0/reference/viper/config-files/) with support for
+[dynamic configuration](https://vitess.io/docs/18.0/reference/viper/dynamic-values/) ([see also](https://github.com/vitessio/vitess/blob/release-18.0/doc/viper/viper.md)).
+- The [reference documentation](https://vitess.io/docs/18.0/reference/programs/vtctldclient/) is now built through code. This
+removes a burden from developers while helping users by ensuring the docs are always correct and up-to-date.
+
+In Vitess 18 we have completed migrating all client commands to `vtctldclient` – the last ones being the [OnlineDDL](https://github.com/vitessio/vitess/issues/13712)
+and [VReplication](https://github.com/vitessio/vitess/issues/12152) commands. With this work now completed, the
+legacy `vtctl`/`vtctlclient` binaries are now fully deprecated and we plan to remove them in Vitess 19. You should
+[begin your transition](https://vitess.io/docs/18.0/reference/vtctldclient-transition/) before upgrading to Vitess 18.
+
+#### Deprecated Command Line Flags
+
+Throttler related `vttablet` flags:
+
+- `--throttle_threshold` is deprecated and will be removed in `v19`
+- `--throttle_metrics_query` is deprecated and will be removed in `v19`
+- `--throttle_metrics_threshold` is deprecated and will be removed in `v19`
+- `--throttle_check_as_check_self` is deprecated and will be removed in `v19`
+- `--throttler-config-via-topo` is deprecated after assumed `true` in `v17`. It will be removed in a future version.
+
+Cache related `vttablet` flags:
+
+- `--queryserver-config-query-cache-lfu` is deprecated and will be removed in `v19`. The query cache always uses a LFU implementation now.
+- `--queryserver-config-query-cache-size` is deprecated and will be removed in `v19`. This option only applied to LRU caches, which are now unsupported.
+
+Buffering related `vtgate` flags:
+
+- `--buffer_implementation` is deprecated and will be removed in `v19`
+
+Cache related `vtgate` flags:
+
+- `--gate_query_cache_lfu` is deprecated and will be removed in `v19`. The query cache always uses a LFU implementation now.
+- `--gate_query_cache_size` is deprecated and will be removed in `v19`. This option only applied to LRU caches, which are now unsupported.
+
+VTGate flags:
+
+- `--schema_change_signal_user` is deprecated and will be removed in `v19`
+- `--foreign_key_mode` is deprecated and will be removed in `v19`. For more detail read the [foreign keys](#foreign-keys) section.
+
+VDiff v1:
+
+[VDiff v2 was added in Vitess 15](https://vitess.io/blog/2022-11-22-vdiff-v2/) and marked as GA in 16.
+The [legacy v1 client command](https://vitess.io/docs/18.0/reference/vreplication/vdiffv1/) is now deprecated in Vitess 18 and will be **removed** in Vitess 19.
+Please switch all of your usage to the [new VDiff client](https://vitess.io/docs/18.0/reference/vreplication/vdiff/) command ASAP.
+
+
+#### Deprecated Stats
+
+The following `EmergencyReparentShard` stats are deprecated in `v18` and will be removed in `v19`:
+- `ers_counter`
+- `ers_success_counter`
+- `ers_failure_counter`
+
+These metrics are replaced by [new reparenting stats introduced in `v18`](#vtctld-and-vtorc-reparenting-stats).
+
+VTBackup stat `DurationByPhase` is deprecated. Use the binary-valued `Phase` stat instead.
+
+#### Deleted Command Line Flags
+
+Flags in `vtcombo`:
+- `--vtctld_addr`
+
+Flags in `vtctldclient ApplySchema`:
+- `--skip-preflight`
+
+Flags in `vtctl ApplySchema`:
+- `--skip_preflight`
+
+Flags in `vtgate`:
+- `--vtctld_addr`
+
+Flags in `vttablet`:
+- `--vtctld_addr`
+- `--use_super_read_only`
+- `--disable-replication-manager`
+- `--init_populate_metadata`
+- `--queryserver-config-pool-prefill-parallelism`
+- `--queryserver-config-stream-pool-prefill-parallelism`
+- `--queryserver-config-transaction-pool-prefill-parallelism`
+- `--queryserver-config-schema-change-signal-interval`
+- `--enable-lag-throttler`
+
+Flags in `vtctld`:
+- `--vtctld_show_topology_crud`
+- `--durability_policy`
+
+Flags in `vtorc`:
+- `--lock-shard-timeout`
+- `--orc_web_dir`
+
+#### Deleted `v3` planner
+
+The `Gen4` planner has been the default planner since Vitess 14. The `v3` planner was deprecated in Vitess 15 and has been removed in Vitess 18.
+
+#### Deleted `k8stopo`
+
+`k8stopo` was deprecated in Vitess 17, see https://github.com/vitessio/vitess/issues/13298. It has now been removed.
+
+#### Deleted `vtgr`
+
+`vtgr` was deprecated in Vitess 17, see https://github.com/vitessio/vitess/issues/13300. It has now been removed.
+
+#### Deleted `query_analyzer`
+
+The undocumented `query_analyzer` binary has been removed in Vitess 18, see https://github.com/vitessio/vitess/issues/14054.
+
+### New stats
+
+#### VTGate Vindex unknown parameters
+
+The VTGate stat `VindexUnknownParameters` gauges unknown Vindex parameters found in the latest VSchema pulled from the topology.
+
+#### VTBackup `Phase` stat
+
+In v17, the `vtbackup` stat `DurationByPhase` stat was added to measure the time spent by `vtbackup` in each phase. This stat turned out to be awkward to use in production, and has been replaced in v18 by a binary-valued `Phase` stat.
+
+`Phase` reports a 1 (active) or a 0 (inactive) for each of the following phases:
+
+ * `CatchupReplication`
+ * `InitialBackup`
+ * `RestoreLastBackup`
+ * `TakeNewBackup`
+
+To calculate how long `vtbackup` has spent in a given phase, sum the 1-valued data points over time and multiply by the data collection or reporting interval. For example, in Prometheus:
+
+```
+sum_over_time(vtbackup_phase{phase="TakeNewBackup"}) *
+```
+#### VTBackup `PhaseStatus` stat
+
+`PhaseStatus` reports a 1 (active) or a 0 (inactive) for each of the following phases and statuses:
+
+ * `CatchupReplication` phase has statuses `Stalled` and `Stopped`.
+ * `Stalled` is set to `1` when replication stops advancing.
+ * `Stopped` is set to `1` when replication stops before `vtbackup` catches up with the primary.
+
+#### Backup and restore metrics for AWS S3
+
+Requests to AWS S3 are instrumented in backup and restore metrics. For example:
+
+```
+vtbackup_backup_count{component="BackupStorage",implementation="S3",operation="AWS:Request:Send"} 823
+vtbackup_backup_duration_nanoseconds{component="BackupStorage",implementation="S3",operation="AWS:Request:Send"} 1.33632421437e+11
+vtbackup_restore_count{component="BackupStorage",implementation="S3",operation="AWS:Request:Send"} 165
+vtbackup_restore_count{component="BackupStorage",implementation="S3",operation="AWS:Request:Send"} 165
+```
+
+#### VTCtld and VTOrc reparenting stats
+
+New VTCtld and VTOrc stats were added to measure frequency of reparents by keyspace/shard:
+- `emergency_reparent_counts` - Number of times `EmergencyReparentShard` has been run. It is further subdivided by the keyspace, shard and the result of the operation.
+- `planned_reparent_counts` - Number of times `PlannedReparentShard` has been run. It is further subdivided by the keyspace, shard and the result of the operation.
+
+Also, the `reparent_shard_operation_timings` stat was added to provide per-operation timings of reparent operations.
+
+### VTTablet
+
+#### New ResetSequences rpc
+
+A new VTTablet RPC `ResetSequences` has been added, which is being used by `MoveTables` and `Migrate` for workflows
+where a `sequence` table is being moved (https://github.com/vitessio/vitess/pull/13238). This has an impact on the
+Vitess upgrade process from an earlier version if you need to use such a workflow before the entire cluster is upgraded.
+
+Any MoveTables or Migrate workflow that moves a sequence table should only be run after all vitess components have been
+upgraded, and no upgrade should be done while such a workflow is in progress.
+
+#### New Dry-run/monitoring-only mode for the transaction throttler
+
+A new CLI flag `--tx-throttler-dry-run` to set the Transaction Throttler to monitoring-only/dry-run mode has been added.
+If the transaction throttler is enabled with `--enable-tx-throttler` and the new dry-run flag is also specified, the
+tablet will not actually throttle any transactions; however, it will increase the counters for transactions throttled
+(`vttablet_transaction_throttler_throttled`). This allows users to deploy the transaction throttler in production and
+gain observability on how much throttling would take place, without actually throttling any requests.
+
+### Docker Builds
+
+#### Bookworm added and made default
+
+Bookworm was released on 2023-06-10, and will be the new default base container for Docker builds.
+Bullseye images will still be built and available as long as the OS build is current, tagged with the `-bullseye` suffix.
+
+#### Buster removed
+
+Buster LTS supports will stop in June 2024, and Vitess 18 will be supported through October 2024.
+To prevent supporting a deprecated buster build for several months after June 2024, we are preemptively
+removing Vitess support for Buster.
+
+### Durability Policies
+
+#### New Durability Policies
+
+Two new built-in durability policies have been added in Vitess 18: `semi_sync_with_rdonly_ack` and `cross_cell_with_rdonly_ack`.
+These policies are similar to `semi_sync` and `cross_cell` respectively, the only difference is that `rdonly` tablets can also send semi-sync ACKs.
+------------
+The entire changelog for this release can be found [here](https://github.com/vitessio/vitess/blob/main/changelog/18.0/18.0.0/changelog.md).
+
+The release includes 420 merged Pull Requests.
+
+Thanks to all our contributors: @GuptaManan100, @Juneezee, @L3o-pold, @adsr, @ajm188, @app/dependabot, @app/github-actions, @app/vitess-bot, @arvind-murty, @austenLacy, @brendar, @davidpiegza, @dbussink, @deepthi, @derekperkins, @ejortegau, @frouioui, @harshit-gangal, @hkdsun, @jfg956, @jspawar, @mattlord, @maxenglander, @mdlayher, @notfelineit, @olyazavr, @pbibra, @peterlyoo, @rafer, @rohit-nayak-ps, @shlomi-noach, @systay, @timvaillancourt, @vmg, @yields
+
diff --git a/changelog/18.0/18.0.0/summary.md b/changelog/18.0/18.0.0/summary.md
new file mode 100644
index 00000000000..eb2b6692201
--- /dev/null
+++ b/changelog/18.0/18.0.0/summary.md
@@ -0,0 +1,318 @@
+## Summary
+
+### Table of Contents
+
+- **[Major Changes](#major-changes)**
+ - **[Breaking Changes](#breaking-changes)**
+ - [Local examples now use etcd v3 storage and API](#local-examples-etcd-v3)
+ - **[New command line flags and behavior](#new-flag)**
+ - [VTOrc flag `--allow-emergency-reparent`](#new-flag-toggle-ers)
+ - [VTOrc flag `--change-tablets-with-errant-gtid-to-drained`](#new-flag-errant-gtid-convert)
+ - [ERS sub flag `--wait-for-all-tablets`](#new-ers-subflag)
+ - [VTGate flag `--grpc-send-session-in-streaming`](#new-vtgate-streaming-sesion)
+ - **[Experimental Foreign Key Support](#foreign-keys)**
+ - **[VTAdmin](#vtadmin)**
+ - [Updated to node v18.16.0](#update-node)
+ - **[Deprecations and Deletions](#deprecations-and-deletions)**
+ - [Legacy Client Binaries](#legacy-client-binaries)
+ - [Deprecated Flags](#deprecated-flags)
+ - [Deprecated Stats](#deprecated-stats)
+ - [Deleted Flags](#deleted-flags)
+ - [Deleted `V3` planner](#deleted-v3)
+ - [Deleted `k8stopo`](#deleted-k8stopo)
+ - [Deleted `vtgr`](#deleted-vtgr)
+ - [Deleted `query_analyzer`](#deleted-query_analyzer)
+ - **[New Stats](#new-stats)**
+ - [VTGate Vindex unknown parameters](#vtgate-vindex-unknown-parameters)
+ - [VTBackup stat `Phase`](#vtbackup-stat-phase)
+ - [VTBackup stat `PhaseStatus`](#vtbackup-stat-phase-status)
+ - [Backup and restore metrics for AWS S3](#backup-restore-metrics-aws-s3)
+ - [VTCtld and VTOrc reparenting stats](#vtctld-and-vtorc-reparenting-stats)
+ - **[VTTablet](#vttablet)**
+ - [VTTablet: New ResetSequences RPC](#vttablet-new-rpc-reset-sequences)
+ - **[Docker](#docker)**
+ - [Debian: Bookworm added and made default](#debian-bookworm)
+ - [Debian: Buster removed](#debian-buster)
+ - **[Durability Policies](#durability-policies)**
+ - [New Durability Policies](#new-durability-policies)
+
+## Major Changes
+
+### Breaking Changes
+
+#### Local examples now use etcd v3 storage and API
+In previous releases the [local examples](https://github.com/vitessio/vitess/tree/main/examples/local) were
+explicitly using etcd v2 storage (`etcd --enable-v2=true`) and API (`ETCDCTL_API=2`) mode. We have now
+removed this legacy etcd usage and instead use the new (default) etcd v3 storage and API. Please see
+[PR #13791](https://github.com/vitessio/vitess/pull/13791) for details. If you are using the local
+examples in any sort of long-term non-testing capacity, then you will need to explicitly use the v2 storage
+and API mode or [migrate your existing data from v2 to v3](https://etcd.io/docs/v3.5/tutorials/how-to-migrate/).
+
+### New command line flags and behavior
+
+#### VTOrc flag `--allow-emergency-reparent`
+
+VTOrc has a new flag `--allow-emergency-reparent` that specifies whether VTOrc is allowed to run emergency
+failover operations. Users that want VTOrc to fix replication issues, but don't want it to run any failovers
+should use this flag. This flag defaults to `true` which corresponds to the default behavior from prior releases.
+
+#### VTOrc flag `--change-tablets-with-errant-gtid-to-drained`
+
+VTOrc has a new flag `--change-tablets-with-errant-gtid-to-drained` that allows users to choose whether VTOrc should change the
+tablet type of tablets with errant GTIDs to `DRAINED`. By default, this flag is disabled.
+
+This feature allows users to configure VTOrc such that any tablet that encounters errant GTIDs is automatically taken out of the
+serving graph. These tablets can then be inspected for what the errant GTIDs are, and once fixed, they can rejoin the cluster.
+
+#### ERS sub flag `--wait-for-all-tablets`
+
+vtctldclient command `EmergencyReparentShard` has a new sub-flag `--wait-for-all-tablets` that makes `EmergencyReparentShard` wait
+for a response from all the tablets. Originally `EmergencyReparentShard` was meant only to be run when a primary tablet is unreachable.
+We have realized now that there are cases when replication is broken but all tablets are reachable. In these cases, it is advisable to
+call `EmergencyReparentShard` with `--wait-for-all-tablets` so that it does not ignore any of the tablets.
+
+#### VTGate GRPC stream execute session flag `--grpc-send-session-in-streaming`
+
+This flag enables transaction support on VTGate's `StreamExecute` gRPC API.
+When this is enabled, `StreamExecute` will return the session in the last packet of the response.
+Users should enable this flag only after client code has been changed to expect such a packet.
+
+It is disabled by default.
+
+### Experimental Foreign Key Support
+
+A new optional field `foreignKeyMode` has been added to the VSchema. This field can be provided for each keyspace. The VTGate flag `--foreign_key_mode` has been deprecated in favor of this field.
+
+There are 3 foreign key modes now supported in Vitess -
+1. `unmanaged` -
+ This mode represents the default behavior in Vitess, where it does not manage foreign key column references. Users are responsible for configuring foreign keys in MySQL in such a way that related rows, as determined by foreign keys, reside within the same shard.
+2. `managed` [EXPERIMENTAL] -
+ In this experimental mode, Vitess is fully aware of foreign key relationships and actively tracks foreign key constraints using the schema tracker. VTGate will handle DML operations with foreign keys and correctly cascade updates and deletes.
+ It will also verify `restrict` constraints and validate the existence of parent rows before inserting child rows.
+ This ensures that all child operations are logged in binary logs, unlike the InnoDB implementation of foreign keys.
+ This allows the usage of VReplication workflows with foreign keys.
+ Implementation details are documented in the [RFC for foreign keys](https://github.com/vitessio/vitess/issues/12967).
+3. `disallow` -
+ In this mode Vitess explicitly disallows any DDL statements that try to create a foreign key constraint. This mode is equivalent to running VTGate with the flag `--foreign_key_mode=disallow`.
+
+#### Upgrade process
+
+After upgrading from v17 to v18, users should specify the correct foreign key mode for all their keyspaces in the VSchema using the new property.
+Once this change has taken effect, the deprecated flag `--foreign_key_mode` can be dropped from all VTGates. Note that this is only required if running in `disallow` mode.
+No action is needed to use `unmanaged` mode.
+
+### VTAdmin
+
+#### vtadmin-web updated to node v18.16.0 (LTS)
+
+Building `vtadmin-web` now requires node >= v18.16.0 (LTS). Breaking changes from v16 to v18 are listed
+in https://nodejs.org/en/blog/release/v18, but none apply to VTAdmin. Full details on node v18.16.0 are listed
+on https://nodejs.org/en/blog/release/v18.16.0.
+
+### Deprecations and Deletions
+
+#### Legacy Client Binaries
+
+`vtctldclient` is our new modern *Vitess controller daemon* (`vtctld`) *client* – which you will use to perform commands
+and take actions in your Vitess clusters. It is [replacing the legacy `vtctl`/`vtctlclient` binaries](https://vitess.io/docs/18.0/reference/vtctldclient-transition/overview/).
+Some of the benefits are:
+
+- [Dedicated RPCs for each command](https://github.com/vitessio/vitess/blob/release-18.0/proto/vtctlservice.proto#L32-L353)
+that are used between `vtctldclient` and `vtctld` – this offers clean separation of commands and makes it easier to
+develop new features without impacting other commands. This also presents an [API that other clients (both Vitess and
+3rd party) can use to interface with Vitess](https://vitess.io/blog/2023-04-17-vtctldserver-api/).
+- Use of modern frameworks: [`pFlag`](https://github.com/spf13/pflag#readme), [`Cobra`](https://cobra.dev), and [`Viper`](https://github.com/spf13/viper#readme).
+This makes development easier while also offering a better UX. For example, this offers a way to use
+[configuration files](https://vitess.io/docs/18.0/reference/viper/config-files/) with support for
+[dynamic configuration](https://vitess.io/docs/18.0/reference/viper/dynamic-values/) ([see also](https://github.com/vitessio/vitess/blob/release-18.0/doc/viper/viper.md)).
+- The [reference documentation](https://vitess.io/docs/18.0/reference/programs/vtctldclient/) is now built through code. This
+removes a burden from developers while helping users by ensuring the docs are always correct and up-to-date.
+
+In Vitess 18 we have completed migrating all client commands to `vtctldclient` – the last ones being the [OnlineDDL](https://github.com/vitessio/vitess/issues/13712)
+and [VReplication](https://github.com/vitessio/vitess/issues/12152) commands. With this work now completed, the
+legacy `vtctl`/`vtctlclient` binaries are now fully deprecated and we plan to remove them in Vitess 19. You should
+[begin your transition](https://vitess.io/docs/18.0/reference/vtctldclient-transition/) before upgrading to Vitess 18.
+
+#### Deprecated Command Line Flags
+
+Throttler related `vttablet` flags:
+
+- `--throttle_threshold` is deprecated and will be removed in `v19`
+- `--throttle_metrics_query` is deprecated and will be removed in `v19`
+- `--throttle_metrics_threshold` is deprecated and will be removed in `v19`
+- `--throttle_check_as_check_self` is deprecated and will be removed in `v19`
+- `--throttler-config-via-topo` is deprecated after assumed `true` in `v17`. It will be removed in a future version.
+
+Cache related `vttablet` flags:
+
+- `--queryserver-config-query-cache-lfu` is deprecated and will be removed in `v19`. The query cache always uses a LFU implementation now.
+- `--queryserver-config-query-cache-size` is deprecated and will be removed in `v19`. This option only applied to LRU caches, which are now unsupported.
+
+Buffering related `vtgate` flags:
+
+- `--buffer_implementation` is deprecated and will be removed in `v19`
+
+Cache related `vtgate` flags:
+
+- `--gate_query_cache_lfu` is deprecated and will be removed in `v19`. The query cache always uses a LFU implementation now.
+- `--gate_query_cache_size` is deprecated and will be removed in `v19`. This option only applied to LRU caches, which are now unsupported.
+
+VTGate flags:
+
+- `--schema_change_signal_user` is deprecated and will be removed in `v19`
+- `--foreign_key_mode` is deprecated and will be removed in `v19`. For more detail read the [foreign keys](#foreign-keys) section.
+
+VDiff v1:
+
+[VDiff v2 was added in Vitess 15](https://vitess.io/blog/2022-11-22-vdiff-v2/) and marked as GA in 16.
+The [legacy v1 client command](https://vitess.io/docs/18.0/reference/vreplication/vdiffv1/) is now deprecated in Vitess 18 and will be **removed** in Vitess 19.
+Please switch all of your usage to the [new VDiff client](https://vitess.io/docs/18.0/reference/vreplication/vdiff/) command ASAP.
+
+
+#### Deprecated Stats
+
+The following `EmergencyReparentShard` stats are deprecated in `v18` and will be removed in `v19`:
+- `ers_counter`
+- `ers_success_counter`
+- `ers_failure_counter`
+
+These metrics are replaced by [new reparenting stats introduced in `v18`](#vtctld-and-vtorc-reparenting-stats).
+
+VTBackup stat `DurationByPhase` is deprecated. Use the binary-valued `Phase` stat instead.
+
+#### Deleted Command Line Flags
+
+Flags in `vtcombo`:
+- `--vtctld_addr`
+
+Flags in `vtctldclient ApplySchema`:
+- `--skip-preflight`
+
+Flags in `vtctl ApplySchema`:
+- `--skip_preflight`
+
+Flags in `vtgate`:
+- `--vtctld_addr`
+
+Flags in `vttablet`:
+- `--vtctld_addr`
+- `--use_super_read_only`
+- `--disable-replication-manager`
+- `--init_populate_metadata`
+- `--queryserver-config-pool-prefill-parallelism`
+- `--queryserver-config-stream-pool-prefill-parallelism`
+- `--queryserver-config-transaction-pool-prefill-parallelism`
+- `--queryserver-config-schema-change-signal-interval`
+- `--enable-lag-throttler`
+
+Flags in `vtctld`:
+- `--vtctld_show_topology_crud`
+- `--durability_policy`
+
+Flags in `vtorc`:
+- `--lock-shard-timeout`
+- `--orc_web_dir`
+
+#### Deleted `v3` planner
+
+The `Gen4` planner has been the default planner since Vitess 14. The `v3` planner was deprecated in Vitess 15 and has been removed in Vitess 18.
+
+#### Deleted `k8stopo`
+
+`k8stopo` was deprecated in Vitess 17, see https://github.com/vitessio/vitess/issues/13298. It has now been removed.
+
+#### Deleted `vtgr`
+
+`vtgr` was deprecated in Vitess 17, see https://github.com/vitessio/vitess/issues/13300. It has now been removed.
+
+#### Deleted `query_analyzer`
+
+The undocumented `query_analyzer` binary has been removed in Vitess 18, see https://github.com/vitessio/vitess/issues/14054.
+
+### New stats
+
+#### VTGate Vindex unknown parameters
+
+The VTGate stat `VindexUnknownParameters` gauges unknown Vindex parameters found in the latest VSchema pulled from the topology.
+
+#### VTBackup `Phase` stat
+
+In v17, the `vtbackup` stat `DurationByPhase` stat was added to measure the time spent by `vtbackup` in each phase. This stat turned out to be awkward to use in production, and has been replaced in v18 by a binary-valued `Phase` stat.
+
+`Phase` reports a 1 (active) or a 0 (inactive) for each of the following phases:
+
+ * `CatchupReplication`
+ * `InitialBackup`
+ * `RestoreLastBackup`
+ * `TakeNewBackup`
+
+To calculate how long `vtbackup` has spent in a given phase, sum the 1-valued data points over time and multiply by the data collection or reporting interval. For example, in Prometheus:
+
+```
+sum_over_time(vtbackup_phase{phase="TakeNewBackup"}) *
+```
+#### VTBackup `PhaseStatus` stat
+
+`PhaseStatus` reports a 1 (active) or a 0 (inactive) for each of the following phases and statuses:
+
+ * `CatchupReplication` phase has statuses `Stalled` and `Stopped`.
+ * `Stalled` is set to `1` when replication stops advancing.
+ * `Stopped` is set to `1` when replication stops before `vtbackup` catches up with the primary.
+
+#### Backup and restore metrics for AWS S3
+
+Requests to AWS S3 are instrumented in backup and restore metrics. For example:
+
+```
+vtbackup_backup_count{component="BackupStorage",implementation="S3",operation="AWS:Request:Send"} 823
+vtbackup_backup_duration_nanoseconds{component="BackupStorage",implementation="S3",operation="AWS:Request:Send"} 1.33632421437e+11
+vtbackup_restore_count{component="BackupStorage",implementation="S3",operation="AWS:Request:Send"} 165
+vtbackup_restore_count{component="BackupStorage",implementation="S3",operation="AWS:Request:Send"} 165
+```
+
+#### VTCtld and VTOrc reparenting stats
+
+New VTCtld and VTOrc stats were added to measure frequency of reparents by keyspace/shard:
+- `emergency_reparent_counts` - Number of times `EmergencyReparentShard` has been run. It is further subdivided by the keyspace, shard and the result of the operation.
+- `planned_reparent_counts` - Number of times `PlannedReparentShard` has been run. It is further subdivided by the keyspace, shard and the result of the operation.
+
+Also, the `reparent_shard_operation_timings` stat was added to provide per-operation timings of reparent operations.
+
+### VTTablet
+
+#### New ResetSequences rpc
+
+A new VTTablet RPC `ResetSequences` has been added, which is being used by `MoveTables` and `Migrate` for workflows
+where a `sequence` table is being moved (https://github.com/vitessio/vitess/pull/13238). This has an impact on the
+Vitess upgrade process from an earlier version if you need to use such a workflow before the entire cluster is upgraded.
+
+Any MoveTables or Migrate workflow that moves a sequence table should only be run after all vitess components have been
+upgraded, and no upgrade should be done while such a workflow is in progress.
+
+#### New Dry-run/monitoring-only mode for the transaction throttler
+
+A new CLI flag `--tx-throttler-dry-run` to set the Transaction Throttler to monitoring-only/dry-run mode has been added.
+If the transaction throttler is enabled with `--enable-tx-throttler` and the new dry-run flag is also specified, the
+tablet will not actually throttle any transactions; however, it will increase the counters for transactions throttled
+(`vttablet_transaction_throttler_throttled`). This allows users to deploy the transaction throttler in production and
+gain observability on how much throttling would take place, without actually throttling any requests.
+
+### Docker Builds
+
+#### Bookworm added and made default
+
+Bookworm was released on 2023-06-10, and will be the new default base container for Docker builds.
+Bullseye images will still be built and available as long as the OS build is current, tagged with the `-bullseye` suffix.
+
+#### Buster removed
+
+Buster LTS supports will stop in June 2024, and Vitess 18 will be supported through October 2024.
+To prevent supporting a deprecated buster build for several months after June 2024, we are preemptively
+removing Vitess support for Buster.
+
+### Durability Policies
+
+#### New Durability Policies
+
+Two new built-in durability policies have been added in Vitess 18: `semi_sync_with_rdonly_ack` and `cross_cell_with_rdonly_ack`.
+These policies are similar to `semi_sync` and `cross_cell` respectively, the only difference is that `rdonly` tablets can also send semi-sync ACKs.
\ No newline at end of file
diff --git a/changelog/18.0/README.md b/changelog/18.0/README.md
new file mode 100644
index 00000000000..97676dc7e39
--- /dev/null
+++ b/changelog/18.0/README.md
@@ -0,0 +1,4 @@
+## v18.0
+* **[18.0.0](18.0.0)**
+ * [Changelog](18.0.0/changelog.md)
+ * [Release Notes](18.0.0/release_notes.md)
diff --git a/changelog/README.md b/changelog/README.md
index 97c338f9942..ffb8d698b28 100644
--- a/changelog/README.md
+++ b/changelog/README.md
@@ -1,4 +1,5 @@
## Releases
+* [18.0](18.0)
* [17.0](17.0)
* [16.0](16.0)
* [15.0](15.0)
diff --git a/config/user.json b/config/user.json
index 28fba88b3c9..773f1b3156e 100644
--- a/config/user.json
+++ b/config/user.json
@@ -40,7 +40,7 @@
"KeySpaces": [
{"Name":"*", "WhiteIPs": []}
],
- "Privilege": 1,
+ "Privilege": 255,
"ReadRole": 1
},
"mysql_rs": {
diff --git a/dev.env b/dev.env
index 7426dde45f2..b90ef7eed40 100644
--- a/dev.env
+++ b/dev.env
@@ -25,9 +25,6 @@ source ./build.env
export VTPORTSTART=6700
-# Add chromedriver to path for Selenium tests.
-PATH=$(prepend_path "$PATH" "$VTROOT/dist/chromedriver")
-
# Node path.
PATH=$(prepend_path "$PATH" "$VTROOT/dist/node/bin")
export PATH
diff --git a/doc/VIT-03-report-security-audit.pdf b/doc/VIT-03-report-security-audit.pdf
new file mode 100644
index 00000000000..500ee693377
Binary files /dev/null and b/doc/VIT-03-report-security-audit.pdf differ
diff --git a/doc/design-docs/VTGateBuffering.md b/doc/design-docs/VTGateBuffering.md
new file mode 100644
index 00000000000..9155929ea49
--- /dev/null
+++ b/doc/design-docs/VTGateBuffering.md
@@ -0,0 +1,63 @@
+# Adding buffering to VTGate while switching traffic during a movetables operation
+
+## Current buffering support in VTGate
+
+VTGate currently supports buffering of queries during reparenting and resharding operations. This is done by buffering
+the failing queries in the tablet gateway layer in vtgate. When a query fails, the reason for the failure is checked, to
+see if is due to one of these.
+
+To assist in diagnosing the root cause a _KeyspaceEventWatcher_ (aka *KEW*) was introduced. This watches the
+SrvKeyspace (in a goroutine): if there is a change to the keyspace partitions in the topo it is considered that there is
+a resharding operation in progress. The buffering logic subscribes to the keyspace event watcher.
+
+Otherwise, if there are no tables to serve from, based on the health check results, it is assumed that there is a
+cluster event where either the primary is being reparented or if the cluster is being restarted and all tablets are in
+the process of starting up.
+
+If either of these occurs, the _consistent_ flag is set to false for that keyspace. When that happens the keyspace
+watcher checks, on every SrvKeyspace update, if the event has got resolved. This can happen when tablets are now
+available (in case of a cluster event) or if the partition information indicates that resharding is complete.
+
+When that happens. the keyspace event watcher publishes an event that the keyspace is now consistent. The buffers are
+then drained and the queries retried by the tablet gateway.
+
+## Adding buffering support for MoveTables
+
+### Background
+
+MoveTables does not affect the entire keyspace, just the tables being moved. Even if all tables are being moved there is
+no change in existing keyspace or shard configurations. So the KEW doesn't detect a cluster event since the tablets are
+still available and shard partitions are unchanged.
+
+MoveTables moves tables from one keyspace to another. There are two flavors of MoveTables: one where the tables are
+moved into all shards in the target keyspace. In Shard-By-Shard Migration user can specify a subset of shards to move
+the tables into.
+
+These are the topo attributes that are affected during a MoveTables (regular or shard-by-shard):
+
+* *DeniedTables* in a shard's TabletControls. These are used to stop writes to the source keyspace for these tables.
+ While switching writes we first create these entries, wait for the target to catchup to the source (using gtid
+ positions), and then update the routing rules to point these tables to the target. When a primary sees a DeniedTables
+ entry during a DML it will error with an "enforce denied tables".
+* *RoutingRules* (for regular movetables) and *ShardRoutingRules* (for shard by shard migration). Routing rules are
+ pointers for each table being moved to a keyspace. When a MoveTables is initiated, that keyspace is the source
+ keyspace. After traffic is switched the pointer is changed to point to the target keyspace. If routing rules are
+ specified, VTGate uses them to decide which keyspace to route each table.
+
+### Changes
+
+There are two main changes:
+
+* The keyspace event watcher is enhanced to look at the topo attributes mentioned above. An SrvVSchema watcher looks for
+ changes in the Routing Rules. DeniedTables are only in the Shard records in the topo. So any changes to the
+ DeniedTables would not result in a notification. To get around that we change the traffic switcher to also rebuild
+ SrvVSchema when DeniedTables are modified.
+* The logic to start buffering needs to look for the "enforce denied tables" error that is thrown by the vttablets when
+ it tries to execute a query on a table being switched.
+* We cannot use the current query retry logic which is at the tablet gateway level: meaning the keyspace is already
+ fixed by the planner and cannot be changed in that layer. We need to add a new retry logic at a higher level (the
+ _newExecute_ method) and always replan before retrying a query. This also means that we need to bypass the plan cache
+ while retrying.
+
+
+
diff --git a/doc/internal/Overview.md b/doc/internal/README.md
similarity index 61%
rename from doc/internal/Overview.md
rename to doc/internal/README.md
index e1cb74e4ddd..7ed4950e877 100644
--- a/doc/internal/Overview.md
+++ b/doc/internal/README.md
@@ -1,5 +1,7 @@
# Internal Documentation
-The documents in this category document internal processes which are taken care of by the Vitess Team e.g. re-publishing the website [vitess.io](https://vitess.io) or creating a new release.
+The documents in this category document internal processes which are taken care of by the Vitess Team e.g. creating a new release.
We have put them here to increase transparency and make it easy for others to follow and improve processes.
+
+- [**Release**](./release/README.md)
\ No newline at end of file
diff --git a/doc/internal/.images/post-release-01.png b/doc/internal/release/.images/post-release-01.png
similarity index 100%
rename from doc/internal/.images/post-release-01.png
rename to doc/internal/release/.images/post-release-01.png
diff --git a/doc/internal/.images/release-01.png b/doc/internal/release/.images/release-01.png
similarity index 100%
rename from doc/internal/.images/release-01.png
rename to doc/internal/release/.images/release-01.png
diff --git a/doc/internal/.images/release-02.png b/doc/internal/release/.images/release-02.png
similarity index 100%
rename from doc/internal/.images/release-02.png
rename to doc/internal/release/.images/release-02.png
diff --git a/doc/internal/.images/release-03.png b/doc/internal/release/.images/release-03.png
similarity index 100%
rename from doc/internal/.images/release-03.png
rename to doc/internal/release/.images/release-03.png
diff --git a/doc/internal/.images/release-04.png b/doc/internal/release/.images/release-04.png
similarity index 100%
rename from doc/internal/.images/release-04.png
rename to doc/internal/release/.images/release-04.png
diff --git a/doc/internal/release/README.md b/doc/internal/release/README.md
new file mode 100644
index 00000000000..8f593ee9e66
--- /dev/null
+++ b/doc/internal/release/README.md
@@ -0,0 +1,13 @@
+# Release Instructions
+
+This page describes the steps for cutting a new [open source release](https://github.com/vitessio/vitess/releases).
+
+### Summary
+
+- [How to Release](./how-to-release.md)
+- [Versioning](./versioning.md)
+- [Release Branches](./release-branches.md)
+- [Release Tags](./release-tags.md)
+- [Docker Images](./docker-images.md)
+- [Java Packages](./java-packages.md)
+- [End Of Life Process](./eol-process.md)
diff --git a/doc/internal/release/docker-images.md b/doc/internal/release/docker-images.md
new file mode 100644
index 00000000000..75941ca6309
--- /dev/null
+++ b/doc/internal/release/docker-images.md
@@ -0,0 +1,3 @@
+# Docker Images
+
+Docker images built automatically on DockerHub and can be found [here](https://hub.docker.com/repository/docker/vitess/lite/).
diff --git a/doc/internal/release/eol-process.md b/doc/internal/release/eol-process.md
new file mode 100644
index 00000000000..f1d2a343d0f
--- /dev/null
+++ b/doc/internal/release/eol-process.md
@@ -0,0 +1,12 @@
+# End-of-Life Process
+
+The lifespan of a major version is one year long, after that time, the version has reached its end-of-life.
+To properly deprecate a major of Vitess follow the following steps:
+
+- **Update the website documentation**
+ > - In the ['Releases' documentation](https://vitess.io/docs/releases/), the EOL version must be moved under the ['Archived Releases' section](https://vitess.io/docs/releases/#archived-releases).
+ > - The sidebar of the website must be changed. We need to remove the EOL version from it. To do so, we move the version folder onto the `archive` folder.
+- **Delete the `Backport To: ...` label**
+ > - Delete the corresponding label for the EOL version, we do not want to motivate anymore backport to the EOL release branch.
+- **Make proper announcement on Slack**
+ > - Notify the community of this deprecation.
\ No newline at end of file
diff --git a/doc/internal/ReleaseInstructions.md b/doc/internal/release/how-to-release.md
similarity index 66%
rename from doc/internal/ReleaseInstructions.md
rename to doc/internal/release/how-to-release.md
index 80ad0d38458..450127bd869 100644
--- a/doc/internal/ReleaseInstructions.md
+++ b/doc/internal/release/how-to-release.md
@@ -1,155 +1,53 @@
-# Release Instructions
+# Release Cutover
-This page describes the steps for cutting a new [open source release](https://github.com/vitessio/vitess/releases).
+In this section we describe our current release process. Below is a summary of this document.
-### Summary
+- [**Pre-requisite for the release team**](#pre-requisites)
+- [**Overview**](#overview)
+- [**Pre-Release**](#pre-release)
+- [**Release**](#release)
+- [**Post-Release**](#post-release)
+- [**How To prepare the release of Vitess**](#how-to-prepare-the-release-of-vitess)
+- [**How To Release Vitess**](#how-to-release-vitess)
+- [**How To Code Freeze**](#how-to-code-freeze)
+- [**How To Merge During Code Freeze**](#how-to-merge-during-code-freeze)
+- [**Java Packages: Deploy & Release**](#java-packages-deploy--release)
-- [Versioning](#versioning)
-- [Release Branches](#release-branches)
-- [Release Tags](#release-tags)
-- [Docker Images](#docker-images)
-- [Java Packages](#java-packages)
-- [Release Cutover](#release-cutover)
--------
+-----
-## Versioning
-
-Our versioning strategy is based on [VEP5](https://github.com/vitessio/enhancements/blob/main/veps/vep-5.md).
-
-### Major Release (vX)
-
-A new major release is needed when the public API changes in a
-backward-incompatible way -- for example, when removing deprecated interfaces.
-
-Our public API includes (but is not limited to):
-
-* The VTGate [RPC interfaces](https://github.com/vitessio/vitess/tree/main/proto).
-* The interfaces exposed by the VTGate client library in each language.
-
-Care must also be taken when changing the format of any data stored by a live
-system, such as topology data or Vitess-internal tables (used for sequences,
-distributed transactions, etc.). Although this data is considered as internal to
-Vitess, if any change breaks the upgrade path for a live system (for example,
-requiring that it be shut down and reinitialized from scratch), then it must be
-considered as a breaking change.
-
-### Minor Release (vX.Y)
-
-A new minor release indicates that functionality has been added or changed in a
-backward-compatible way. This should be the majority of normal releases.
-
-### Patch Release (vX.Y.Z)
-
-A patch release indicates that only a select set of bugfixes have been
-cherry-picked onto the associated minor release. The expectation is that
-upgrading by a patch release should be painless (not requiring any config
-changes) and safe (isolated from active development on `main`).
-
-### Pre-Release Labels (vX.Y.Z-labelN)
-
-Pre-release versions should be labeled with a suffix like `-beta2` or `-rc1`.
-
--------
-
-## Release Branches
-
-Each major and minor releases (X.Y) should have a [release branch](https://github.com/vitessio/vitess/branches/all?query=release) named
-`release-X.Y`. This branch should diverge from `main` when the release
-is declared, after which point only bugfix PRs should be cherry-picked onto the branch.
-All other activity on `main` will go out with a subsequent major or minor release.
-
-```shell
-git checkout main
-git pull --ff-only upstream main
-
-git checkout -b release-X.Y
-git push upstream release-X.Y
-```
-
-The branches are named `release-X.Y` to distinguish them from point-in-time
-tags, which are named `vX.Y.Z`.
-
--------
-
-## Release Tags
-
-While the release branch is a moving target, release tags mark point-in-time
-snapshots of the repository. Essentially, a tag assigns a human-readable name to
-a specific Git commit hash. Although it's technically possible to reassign a tag
-name to a different hash, we must never do this.
-
--------
-
-## Docker Images
-
-Docker images built automatically on DockerHub and can be found [here](https://hub.docker.com/repository/docker/vitess/lite/).
-
--------
-
-## Java Packages
-
-We publish binary packages for our [JDBC driver and Java client on Maven Central](https://search.maven.org/#search|ga|1|g:"io.vitess").
-
-To do so, we use the http://oss.sonatype.org/ repository.
-New packages must be uploaded there ("deployed") and will be automatically published ("released").
-Once they are released there, they will be automatically synchronized with Maven Central.
-The synchronization takes only several minutes, but the update on http://search.maven.org may take up to two hours.
-
-### Access to oss.sonatype.org
-
-[Sign up here.](https://issues.sonatype.org/secure/Signup!default.jspa)
-Then you must be added as member to our `io.vitess` namespace.
-Therefore, file a JIRA ticket with Sonatype to get added ([example for a different namespace](https://issues.sonatype.org/browse/OSSRH-30604)).
-
-### One-time setup
-
-#### Set up GPG
-
-Follow [Sonatype's GPG instructions](https://central.sonatype.org/pages/working-with-pgp-signatures.html).
-
-Install `gpg-agent` (needed below) e.g. on Ubuntu via: `sudo apt-get install gnupg-agent`.
-for Mac you need to install 'gnupg' via 'brew install gnupg'
-
-#### Login configuration
-
-Create the `settings.xml` in the `$HOME/.m2/` directory as described in their [instructions](https://central.sonatype.org/pages/apache-maven.html).
-
--------
-
-## Release Cutover
-
-In this section we describe our current release process. We begin with a list of [**pre-requisite for the release team**](#pre-requisites) and with a short [**overview**](#overview).
-The release process is divided into three parts: [**Pre-Release**](#pre-release), [**Release**](#release), [**Post-Release**](#post-release), which are detailed after the overview.
-
-### Pre-Requisites
+## Pre-Requisites
This section highlights the different pre-requisites the release team has to meet before releasing.
- The tool `gh` must be installed locally and ready to be used.
-- You must have access to the Java release, more information in the [**Java Packages**](#java-packages) section.
+- You must have access to the Java release, more information in the [**Java Packages**](./java-packages.md) section.
- You must be able to create branches and have admin right on the `vitessio/vitess` and `planetscale/vitess-operator` repositories.
-### Overview
+-----
-#### Schedule
+## Overview
+
+### Schedule
A new major version of Vitess is released every four months. For each major version there is at least one release candidate, which we release three weeks before the GA version.
We usually create the RC1 during the first week of the month, and the GA version three weeks later.
-#### Code Freeze
+### Code Freeze
Before creating RC1, there is a code freeze. Assuming the release of RC1 happens on a Tuesday, the release branch will be frozen Friday of the previous week.
This allows us to test that the release branch can be released and avoid discovering unwanted events during the release day. Once the RC1 is released, there are three more weeks to backport bug fixes into the release branches.
However, we also proceed to a code freeze the Friday before the GA release. (Assuming GA is on a Tuesday)
Regarding patch releases, no code freeze is planned.
-#### Tracking Issue for each Release
+### Tracking Issue for each Release
For each release, it is recommended to create an issue like [this one](https://github.com/vitessio/vitess/issues/10476) to track the current and past progress of a release.
It also allows us to document what happened during a release.
-### Pre-Release
+-----
+
+## Pre-Release
This step happens a few weeks before the actual release (whether it is an RC, GA or a patch release).
The main goal of this step is to make sure everything is ready to be released for the release day.
@@ -158,8 +56,9 @@ That includes:
> - All the Pull Requests that need to be in the release must be reviewed and merged before the code freeze.
> - The code freeze usually happens a few days before the release.
- **Making sure the people doing the release have access to all the tools and infrastructure needed to do the release.**
- > - This includes write access to the Vitess repository and to the Maven repository.
+ > - This includes write access to the Vitess repository and to the Maven repository.
- **Preparing and cleaning the release notes summary.**
+ > - If the release does not contain significant changes (i.e. a small patch release) then this step can be skipped
> - One or more Pull Requests have to be submitted in advance to create and update the release summary.
> - The summary files are located in: `./changelog/*.0/*.*.*/summary.md`.
> - The summary file for a release candidate is the same as the one for the GA release.
@@ -179,21 +78,19 @@ That includes:
> - While the Vitess Operator is located in a different repository, we also need to do a release for it.
> - The Operator follows the same cycle: RC1 -> GA -> Patches.
> - Documentation for the pre-release of the Vitess Operator is available [here](https://github.com/planetscale/vitess-operator/blob/main/docs/release-process.md#prepare-for-release).
-- **Update the release notes on `main`.**
- > - One Pull Request against `main` must be created, it will contain the new release notes that we are adding in the Release Pull Request.
- > - We open this Pull Request now to avoid waiting on the CI during release day.
- > - All future changes to the release notes during the code freeze will need to be ported to both PRs: the one on `main` and the Release Pull Request.
- **Update the website documentation.**
> - We want to open a preparatory **draft** Pull Request to update the documentation.
> - There are several pages we want to update:
- > - [The releases page](https://vitess.io/docs/releases/), we must add the new release to the list with all its information and link. The links can be broken (404 error) while we are preparing for the release, this is fine.
- > - [The local install page](https://vitess.io/docs/get-started/local/), we must use the proper version increment for this guide and the proper SHA. The SHA will have to be modified once the Release Pull Request and the release is tagged is merged.
+ > - [The releases page](https://vitess.io/docs/releases/): we must add the new release to the list with all its information and link. The links can be broken (404 error) while we are preparing for the release, this is fine.
+ > - [The local install page](https://vitess.io/docs/get-started/local/): we must use the proper version increment for this guide and the proper SHA. The SHA will have to be modified once the Release Pull Request and the release is tagged is merged.
> - If we are doing a GA or RC release follow the instructions below:
> - There are two scripts in the website repository in `./tools/{ga|rc}_release.sh`, use them to update the website documentation. The scripts automate:
- > - For an RC, we need to create a new version in the sidebar and mark the current version as RC.
- > - For a GA, we need to mark the version we are releasing as "Stable" and the next one as "Development".
+ > - For an RC, we need to create a new entry in the sidebar which represents the next version on `main` and mark the version we are releasing as RC.
+ > - For a GA, we need to mark the version we are releasing as "Stable" and the next one as "Development".
-### Release
+-----
+
+## Release
On the release day, there are several things to do:
@@ -202,7 +99,7 @@ On the release day, there are several things to do:
- **Tag the Vitess release.**
> - A guide on how to tag a version is available in the [How To Release Vitess](#how-to-release-vitess) section.
- **Update the release notes on `main`.**
- > - During the code freeze, we created a Pull Request against `main` to update the release notes. It must be merged.
+ > - One Pull Request against `main` must be created, it will contain the new release notes that we are adding in the Release Pull Request.
- **Create the corresponding Vitess operator release.**
> - Applies only to versions greater or equal to `v14.0.0`.
> - If we are doing an RC release, then we will need to create the Vitess Operator RC too. If we are doing a GA release, we're also doing a GA release in the Operator.
@@ -224,18 +121,22 @@ On the release day, there are several things to do:
> - This step is even more important for GA releases as we often include a link to _arewefastyet_ in the blog post.
> - The benchmarks need to complete before announcing the blog posts or before they get cross-posted.
- **Go back to dev mode on the release branch.**
- > - The version constants across the codebase must be updated to `SNAPSHOT`.
-- **Build k8s Docker images and publish them**
+ > - The version constants across the codebase must be updated to `SNAPSHOT`.
+- **Build k8s Docker images and publish them.**
> - The docker image for `base`, `lite`, etc are built automatically by DockerHub. The k8s images however are dependent on these images and are required to be built manually.
> - These images should be built after the `base` image has been built and available on DockerHub.
- > - To build and publish these images, run `./release.sh` from the directory `vitess/docker`.
+ > - To build and publish these images, checkout the new release tag that was just created and run `./release.sh` from the directory `./docker`.
-### Post-Release
+-----
+
+## Post-Release
Once the release is over, we need to announce it on both Slack and Twitter. We also want to make sure the blog post was cross-posted, if applicable.
We need to verify that _arewefastyet_ has finished the benchmark too.
-### How to prepare the release of Vitess
+-----
+
+## How to prepare the release of Vitess
> In this example our current version is `v14.0.3` and we release the version `v15.0.0`.
> Alongside Vitess' release, we also release a new version of the operator.
@@ -254,8 +155,8 @@ We need to verify that _arewefastyet_ has finished the benchmark too.
```
2. Creation of the Release Pull Request.
- > This step will create the Release Pull Request that will then be reviewed ahead of the release day.
- > The merge commit of that Pull Request will be used during the release day to tag the release.
+ > This step will create the Release Pull Request that will then be reviewed ahead of the release day.
+ > The merge commit of that Pull Request will be used during the release day to tag the release.
1. Run the `create_release` script using the Makefile:
1. Release Candidate:
```shell
@@ -268,18 +169,15 @@ We need to verify that _arewefastyet_ has finished the benchmark too.
The script will prompt you `Pausing so release notes can be added. Press enter to continue`. We are now going to generate the release notes, continue to the next sub-step.
- 2. Run the following command to generate the release notes:
- 1. Release Candidate:
- ```shell
- go run ./go/tools/release-notes --from "v14.0.3" --to "HEAD" --version "v15.0.0-rc1" --summary "./changelog/15.0/15.0.0/summary.md" [--threads=[0-9.]]
- ```
- 2. General Availability:
- ```shell
- go run ./go/tools/release-notes --from "v14.0.3" --to "HEAD" --version "v15.0.0" --summary "./changelog/15.0/15.0.0/summary.md" [--threads=[0-9.]]
- ```
-
- > Important note: The release note generation fetches a lot of data from the GitHub API. You might reach the API request limit.
- In which case you should use the `--threads=` flag and set an integer value lower than 10 (the default).
+ 2. Run the following command to generate the release notes. Note that you can omit the `--summary` flag if there are no summary.
+ ```shell
+ go run ./go/tools/release-notes --version "v15.0.0" --summary "./changelog/15.0/15.0.0/summary.md"
+ ```
+
+ > Make sure to also run `go run ./go/tools/releases/releases.go` to update the `./changelog` directory.
+
+ > Important note: The release note generation fetches a lot of data from the GitHub API. You might reach the API request limit.
+ In which case you should use the `--threads=` flag and set an integer value lower than 10 (the default).
This command will generate the release notes by looking at all the commits between the tag `v14.0.3` and the reference `HEAD`.
It will also use the file located in `./changelog/15.0/15.0.0/summary.md` to prefix the release notes with a text that the maintainers wrote before the release.
@@ -290,25 +188,27 @@ We need to verify that _arewefastyet_ has finished the benchmark too.
4. If we are doing an RC release it means we created a new branch from `main`. We need to update `main` with the next SNAPSHOT version. If `main` was on `15.0.0-SNAPSHOT`, we need to update it to `16.0.0-SNAPSHOT`. A simple find and replace in the IDE is sufficient, there only a handful of files that must be changed: `version.go` and several java files.
-### How To Release Vitess
+-----
+
+## How To Release Vitess
This section is divided into two parts:
- [Creation of the tags and release notes](#creation-of-the-tags-and-release-notes).
- [Creating Release or Release Candidate on the GitHub UI](#creating-release-or-release-candidate-on-the-github-ui)
-#### Creation of the tags and release notes
+### Creation of the tags and release notes
> This step implies that you have created a [Release Pull Request](#how-to-prepare-the-release-of-vitess) beforehand and that it has been reviewed.
> The merge commit of this Release Pull Request will be used to tag the release.
->
+>
> In this example our current version is `v14.0.3` and we release the version `v15.0.0`.
> Alongside Vitess' release, we also release a new version of the operator.
> Since we are releasing a release candidate here, the new version of the operator will also be a release candidate.
> In this example, the new operator version is `2.8.0`.
->
+>
> It is important to note that before the RC, there is a code freeze during which we create the release branch.
>
> The release branch in this example is `release-15.0`.
->
+>
> The example also assumes that `origin` is the `vitessio/vitess` remote.
1. Fetch `github.com/vitessio/vitess`'s remote.
@@ -330,26 +230,26 @@ This section is divided into two parts:
make BASE_BRANCH="release-15.0" BASE_REMOTE="origin" RELEASE_VERSION="15.0.0-rc1" DEV_VERSION="15.0.0-SNAPSHOT" back_to_dev_mode
```
> You will then need to follow the instructions given by the output of the back_to_dev_mode Makefile command. You will need to push the newly created branch and open a Pull Request.
-
+
6. Release the tag on GitHub UI as explained in the following section.
-#### Creating Release or Release Candidate on the GitHub UI
+### Creating Release or Release Candidate on the GitHub UI
> In the below steps, we use `v8.0.0` and `v9.0.0` as an example.
-##### 1. Open the releases page
+#### 1. Open the releases page
On Vitess' GitHub repository main page, click on Code -> [Releases](https://github.com/vitessio/vitess/releases).
![alt text](.images/release-01.png)
-##### 2. Draft a new release
+#### 2. Draft a new release
On the Releases page, click on `Draft a new release`.
![alt text](.images/release-02.png)
-##### 3. Tag a new release
+#### 3. Tag a new release
When drafting a new release, we are asked to choose the release's tag and branch.
We format the tag this way: `v9.0.0`. We append `-rcN` to the tag name for release candidates,
@@ -357,7 +257,7 @@ with `N` being the increment of the release candidate.
![alt text](.images/release-03.png)
-##### 4. Add release notes and release
+#### 4. Add release notes and release
Copy/paste the previously built Release Notes into the description of the release.
@@ -367,7 +267,9 @@ And finally, click on `Publish release`.
![alt text](.images/release-04.png)
-### How To Code Freeze
+-----
+
+## How To Code Freeze
In this example we are going to do a code freeze on the `release-15.0` branch. If we are doing a release candidate, there won't be a branch yet, hence we need to create it.
@@ -397,10 +299,12 @@ The script will prompt the command that will allow you to push the code freeze c
Remember, you should also disable the Launchable integration from the newly created release branch.
-### How To Merge During Code Freeze
+-----
+
+## How To Merge During Code Freeze
> **Warning:** It is not advised to merge a PR during code-freeze. If it is deemed absolutely necessary, then the following steps can be followed.
-
+
The PR that needs to be merged will be failing on the `Code Freeze` CI. To merge this PR, we'll have to mark this CI action as not required.
You will need administrator privileges on the vitess repository to be able to make this change.
@@ -411,15 +315,17 @@ You will need administrator privileges on the vitess repository to be able to ma
5. Within this list find `Code Freeze` and click on the cross next to it to remove it from this list.
6. Save your changes on the bottom of the page.
7. Refresh the page of the PR, and you should be able to merge it.
-8. After merging the PR, you need to do 2 more things -
- 1. Add `Code Freeze` back as a required check.
- 2. Check if the release PR has any merge conflicts. If it does, fix them and push.
+8. After merging the PR, you need to do 2 more things -
+ 1. Add `Code Freeze` back as a required check.
+ 2. Check if the release PR has any merge conflicts. If it does, fix them and push.
-### Java Packages: Deploy & Release
+-----
+
+## Java Packages: Deploy & Release
> **Warning:** This section's steps need to be executed only when releasing a new major version of Vitess,
> or if the Java packages changed from one minor/patch version to another.
->
+>
> For this example, we assume we juste released `v12.0.0`.
1. Checkout to the release commit.
@@ -443,7 +349,7 @@ You will need administrator privileges on the vitess repository to be able to ma
4. Deploy (upload) the Java code to the oss.sonatype.org repository:
- > **Warning:** After the deployment, the Java packages will be automatically released. Once released, you cannot delete them. The only option is to upload a newer version (e.g. increment the patch level).
+ > **Warning:** After the deployment, the Java packages will be automatically released. Once released, you cannot delete them. The only option is to upload a newer version (e.g. increment the patch level).
```bash
cd ./java/
diff --git a/doc/internal/release/java-packages.md b/doc/internal/release/java-packages.md
new file mode 100644
index 00000000000..3b3d2a38472
--- /dev/null
+++ b/doc/internal/release/java-packages.md
@@ -0,0 +1,27 @@
+# Java Packages
+
+We publish binary packages for our [JDBC driver and Java client on Maven Central](https://search.maven.org/#search|ga|1|g:"io.vitess").
+
+To do so, we use the http://oss.sonatype.org/ repository.
+New packages must be uploaded there ("deployed") and will be automatically published ("released").
+Once they are released there, they will be automatically synchronized with Maven Central.
+The synchronization takes only several minutes, but the update on http://search.maven.org may take up to two hours.
+
+## Access to oss.sonatype.org
+
+[Sign up here.](https://issues.sonatype.org/secure/Signup!default.jspa)
+Then you must be added as member to our `io.vitess` namespace.
+Therefore, file a JIRA ticket with Sonatype to get added ([example for a different namespace](https://issues.sonatype.org/browse/OSSRH-30604)).
+
+## One-time setup
+
+### Set up GPG
+
+Follow [Sonatype's GPG instructions](https://central.sonatype.org/pages/working-with-pgp-signatures.html).
+
+Install `gpg-agent` (needed below) e.g. on Ubuntu via: `sudo apt-get install gnupg-agent`.
+for Mac you need to install 'gnupg' via 'brew install gnupg'
+
+### Login configuration
+
+Create the `settings.xml` in the `$HOME/.m2/` directory as described in their [instructions](https://central.sonatype.org/pages/apache-maven.html).
diff --git a/doc/internal/release/release-branches.md b/doc/internal/release/release-branches.md
new file mode 100644
index 00000000000..876ec9070d3
--- /dev/null
+++ b/doc/internal/release/release-branches.md
@@ -0,0 +1,17 @@
+# Release Branches
+
+Each major and minor releases (X.Y) should have a [release branch](https://github.com/vitessio/vitess/branches/all?query=release) named
+`release-X.Y`. This branch should diverge from `main` when the release
+is declared, after which point only bugfix PRs should be cherry-picked onto the branch.
+All other activity on `main` will go out with a subsequent major or minor release.
+
+```shell
+git checkout main
+git pull --ff-only upstream main
+
+git checkout -b release-X.Y
+git push upstream release-X.Y
+```
+
+The branches are named `release-X.Y` to distinguish them from point-in-time
+tags, which are named `vX.Y.Z`.
\ No newline at end of file
diff --git a/doc/internal/release/release-tags.md b/doc/internal/release/release-tags.md
new file mode 100644
index 00000000000..4136df1bbb9
--- /dev/null
+++ b/doc/internal/release/release-tags.md
@@ -0,0 +1,6 @@
+# Release Tags
+
+While the release branch is a moving target, release tags mark point-in-time
+snapshots of the repository. Essentially, a tag assigns a human-readable name to
+a specific Git commit hash. Although it's technically possible to reassign a tag
+name to a different hash, we must never do this.
\ No newline at end of file
diff --git a/doc/internal/release/versioning.md b/doc/internal/release/versioning.md
new file mode 100644
index 00000000000..b760e32d1b5
--- /dev/null
+++ b/doc/internal/release/versioning.md
@@ -0,0 +1,36 @@
+# Versioning
+
+Our versioning strategy is based on [VEP5](https://github.com/vitessio/enhancements/blob/main/veps/vep-5.md).
+
+## Major Release (vX)
+
+A new major release is needed when the public API changes in a
+backward-incompatible way -- for example, when removing deprecated interfaces.
+
+Our public API includes (but is not limited to):
+
+* The VTGate [RPC interfaces](https://github.com/vitessio/vitess/tree/main/proto).
+* The interfaces exposed by the VTGate client library in each language.
+
+Care must also be taken when changing the format of any data stored by a live
+system, such as topology data or Vitess-internal tables (used for sequences,
+distributed transactions, etc.). Although this data is considered as internal to
+Vitess, if any change breaks the upgrade path for a live system (for example,
+requiring that it be shut down and reinitialized from scratch), then it must be
+considered as a breaking change.
+
+## Minor Release (vX.Y)
+
+A new minor release indicates that functionality has been added or changed in a
+backward-compatible way. This should be the majority of normal releases.
+
+## Patch Release (vX.Y.Z)
+
+A patch release indicates that only a select set of bugfixes have been
+cherry-picked onto the associated minor release. The expectation is that
+upgrading by a patch release should be painless (not requiring any config
+changes) and safe (isolated from active development on `main`).
+
+## Pre-Release Labels (vX.Y.Z-labelN)
+
+Pre-release versions should be labeled with a suffix like `-beta2` or `-rc1`.
\ No newline at end of file
diff --git a/doc/vtadmin/clusters.yaml b/doc/vtadmin/clusters.yaml
index 55779df60b2..e4ed5335cc6 100644
--- a/doc/vtadmin/clusters.yaml
+++ b/doc/vtadmin/clusters.yaml
@@ -40,7 +40,16 @@ defaults:
vtsql-discovery-tags: "tag1,tag2"
# Username to send queries on behalf of. See package callerid.
vtsql-effective-user: "my-effective-user"
-
+ # Username used to make requests against vtgates in the cluster. Can be used with
+ # vtsql-credentials-password in place of vtsql-credentials-path-tmpl.
+ # If both vtsql-credentials-username and vtsql-credentials-path-tmpl are
+ # provided, vtsql-credentials-username takes precedent over username from vtsql-credentials-path-tmpl.
+ vtsql-credentials-username: "my-username"
+ # Password used to make requests against vtgates in the cluster. Used with
+ # vtsql-credentials-username in place of vtsql-credentials-path-tmpl.
+ # If both vtsql-credentials-password and vtsql-credentials-path-tmpl are
+ # provided, vtsql-credentials-password takes precedent over password from vtsql-credentials-path-tmpl.
+ vtsql-credentials-password: "my-password"
# VTAdmin also provides different RPC pools to gate the number of concurrent
# requests it will make against vtctlds/vtgates in a given cluster, to prevent
# overwhelming those components.
diff --git a/docker/base/Dockerfile b/docker/base/Dockerfile
index 50aedd982d5..bacf76209a5 100644
--- a/docker/base/Dockerfile
+++ b/docker/base/Dockerfile
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-ARG bootstrap_version=18.0
+ARG bootstrap_version=22.1
ARG image="vitess/bootstrap:${bootstrap_version}-mysql80"
FROM "${image}"
diff --git a/docker/base/Dockerfile.mysql57 b/docker/base/Dockerfile.mysql57
index 32f50c246bb..f36eac096da 100644
--- a/docker/base/Dockerfile.mysql57
+++ b/docker/base/Dockerfile.mysql57
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-ARG bootstrap_version=18.0
+ARG bootstrap_version=22.1
ARG image="vitess/bootstrap:${bootstrap_version}-mysql57"
FROM "${image}"
diff --git a/docker/base/Dockerfile.percona57 b/docker/base/Dockerfile.percona57
index 0e1ae2567ab..c4abe42f959 100644
--- a/docker/base/Dockerfile.percona57
+++ b/docker/base/Dockerfile.percona57
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-ARG bootstrap_version=18.0
+ARG bootstrap_version=22.1
ARG image="vitess/bootstrap:${bootstrap_version}-percona57"
FROM "${image}"
diff --git a/docker/base/Dockerfile.percona80 b/docker/base/Dockerfile.percona80
index b3e27d379eb..7e24001895a 100644
--- a/docker/base/Dockerfile.percona80
+++ b/docker/base/Dockerfile.percona80
@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-ARG bootstrap_version=18.0
+ARG bootstrap_version=22.1
ARG image="vitess/bootstrap:${bootstrap_version}-percona80"
FROM "${image}"
diff --git a/docker/bootstrap/CHANGELOG.md b/docker/bootstrap/CHANGELOG.md
index 455406f07ae..19b84857a08 100644
--- a/docker/bootstrap/CHANGELOG.md
+++ b/docker/bootstrap/CHANGELOG.md
@@ -72,4 +72,28 @@ List of changes between bootstrap image versions.
## [18.0] - 2023-06-07
### Changes
-- Update build to golang 1.20.5
\ No newline at end of file
+- Update build to golang 1.20.5
+-
+## [19] - 2023-06-07
+### Changes
+- Update build to golang 1.20.5
+
+## [20] - 2023-08-03
+### Changes
+- Bump all images to bullseye base image
+
+## [21] - 2023-08-25
+### Changes
+- Update build to golang 1.21.0
+
+## [22] - 2023-09-07
+### Changes
+- Update build to golang 1.21.1
+
+## [22.0] - 2023-10-05
+### Changes
+- Update build to golang 1.21.2
+
+## [22.1] - 2023-10-10
+### Changes
+- Update build to golang 1.21.3
diff --git a/docker/bootstrap/Dockerfile.common b/docker/bootstrap/Dockerfile.common
index 5c6e4da5930..39b0c16566a 100644
--- a/docker/bootstrap/Dockerfile.common
+++ b/docker/bootstrap/Dockerfile.common
@@ -1,4 +1,4 @@
-FROM --platform=linux/amd64 golang:1.20.5-buster
+FROM --platform=linux/amd64 golang:1.21.3-bullseye
# Install Vitess build dependencies
RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
@@ -22,7 +22,7 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-ins
ENV VTROOT /vt/src/vitess.io/vitess
ENV VTDATAROOT /vt/vtdataroot
ENV VTPORTSTART 15000
-ENV PATH $VTROOT/bin:$VTROOT/dist/maven/bin:$VTROOT/dist/chromedriver:$PATH
+ENV PATH $VTROOT/bin:$VTROOT/dist/maven/bin:$PATH
ENV USER vitess
# Copy files needed for bootstrap
diff --git a/docker/bootstrap/Dockerfile.mysql57-arm64v8 b/docker/bootstrap/Dockerfile.mysql57-arm64v8
deleted file mode 100644
index 96b08413aa1..00000000000
--- a/docker/bootstrap/Dockerfile.mysql57-arm64v8
+++ /dev/null
@@ -1,65 +0,0 @@
-FROM debian:9 AS builder
-
-WORKDIR /opt
-#Build xtrabackup
-RUN apt-get update && \
- DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
- autoconf \
- automake \
- bison \
- build-essential \
- bzr \
- ca-certificates \
- cmake \
- flex \
- libaio-dev \
- libcurl4-gnutls-dev \
- libev-dev \
- libgcrypt11-dev \
- libncurses-dev \
- libtool \
- mysql-client \
- vim-common \
- wget \
- zlib1g-dev && \
- wget https://github.com/percona/percona-xtrabackup/archive/percona-xtrabackup-2.4.13.tar.gz \
- -P /opt && \
- tar zxf /opt/percona-xtrabackup-2.4.13.tar.gz -C /opt && \
- rm /opt/percona-xtrabackup-2.4.13.tar.gz && \
- cd /opt/percona-xtrabackup-percona-xtrabackup-2.4.13 && \
- mkdir bld && cd bld && \
- cmake .. -DBUILD_CONFIG=xtrabackup_release -DWITH_MAN_PAGES=OFF \
- -DDOWNLOAD_BOOST=1 -DWITH_BOOST=/usr/local && \
- make -j4 && \
- make install
-
-ARG bootstrap_version
-ARG image="vitess/bootstrap:${bootstrap_version}-common"
-
-FROM --platform=linux/arm64/v8 "${image}"
-
-# Install MySQL 5.7
-RUN add-apt-repository 'deb http://ftp.debian.org/debian sid main' && \
- apt-get update && \
- DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
- libmysqlclient-dev \
- mysql-client-5.7 \
- mysql-server-5.7 \
- libdbd-mysql-perl \
- python3-distutils-extra \
- rsync \
- libev4 \
- libcurl4-openssl-dev \
- libaio1 && \
- rm -rf /var/lib/apt/lists/* && \
- mkdir -p /usr/local/xtrabackup/bin && \
- mkdir -p /usr/local/xtrabackup/lib
-
-# Bootstrap Vitess
-WORKDIR /vt/src/vitess.io/vitess
-COPY --from=builder /usr/local/xtrabackup/bin /usr/local/xtrabackup/bin
-COPY --from=builder /usr/local/xtrabackup/lib /usr/local/xtrabackup/lib
-ENV PATH="/usr/local/xtrabackup/bin:${PATH}"
-ENV MYSQL_FLAVOR MySQL56
-USER vitess
-RUN ./bootstrap.sh
diff --git a/docker/bootstrap/Dockerfile.mysql80 b/docker/bootstrap/Dockerfile.mysql80
index e064c638d99..059f01b8101 100644
--- a/docker/bootstrap/Dockerfile.mysql80
+++ b/docker/bootstrap/Dockerfile.mysql80
@@ -6,9 +6,9 @@ FROM --platform=linux/amd64 "${image}"
# Install MySQL 8.0
RUN for i in $(seq 1 10); do apt-key adv --no-tty --recv-keys --keyserver keyserver.ubuntu.com 8C718D3B5072E1F5 && break; done && \
for i in $(seq 1 10); do apt-key adv --no-tty --recv-keys --keyserver keyserver.ubuntu.com 467B942D3A79BD29 && break; done && \
- add-apt-repository 'deb http://repo.mysql.com/apt/debian/ buster mysql-8.0' && \
+ add-apt-repository 'deb http://repo.mysql.com/apt/debian/ bullseye mysql-8.0' && \
for i in $(seq 1 10); do apt-key adv --no-tty --keyserver keyserver.ubuntu.com --recv-keys 9334A25F8507EFA5 && break; done && \
- echo 'deb http://repo.percona.com/apt buster main' > /etc/apt/sources.list.d/percona.list && \
+ echo 'deb http://repo.percona.com/apt bullseye main' > /etc/apt/sources.list.d/percona.list && \
{ \
echo debconf debconf/frontend select Noninteractive; \
echo percona-server-server-8.0 percona-server-server/root_password password 'unused'; \
diff --git a/docker/bootstrap/Dockerfile.percona57 b/docker/bootstrap/Dockerfile.percona57
index 2d8beb5e95d..febe09fd8bf 100644
--- a/docker/bootstrap/Dockerfile.percona57
+++ b/docker/bootstrap/Dockerfile.percona57
@@ -5,16 +5,15 @@ FROM --platform=linux/amd64 "${image}"
# Install Percona 5.7
RUN for i in $(seq 1 10); do apt-key adv --no-tty --keyserver keyserver.ubuntu.com --recv-keys 9334A25F8507EFA5 && break; done && \
- add-apt-repository 'deb http://repo.percona.com/apt buster main' && \
+ add-apt-repository 'deb http://repo.percona.com/apt bullseye main' && \
{ \
echo debconf debconf/frontend select Noninteractive; \
echo percona-server-server-5.7 percona-server-server/root_password password 'unused'; \
echo percona-server-server-5.7 percona-server-server/root_password_again password 'unused'; \
} | debconf-set-selections && \
apt-get update && \
- apt-get install -y --no-install-recommends \
- percona-server-server-5.7 \
- libperconaserverclient20-dev percona-xtrabackup-24 && \
+ apt-get install -y --no-install-recommends percona-server-server-5.7 && \
+ apt-get install -y --no-install-recommends libperconaserverclient20-dev percona-xtrabackup-24 && \
rm -rf /var/lib/apt/lists/*
# Bootstrap Vitess
diff --git a/docker/bootstrap/Dockerfile.percona80 b/docker/bootstrap/Dockerfile.percona80
index 5dadc32cd0a..446ec554612 100644
--- a/docker/bootstrap/Dockerfile.percona80
+++ b/docker/bootstrap/Dockerfile.percona80
@@ -5,7 +5,7 @@ FROM --platform=linux/amd64 "${image}"
# Install Percona 8.0
RUN for i in $(seq 1 10); do apt-key adv --no-tty --keyserver keyserver.ubuntu.com --recv-keys 9334A25F8507EFA5 && break; done \
- && echo 'deb http://repo.percona.com/ps-80/apt buster main' > /etc/apt/sources.list.d/percona.list && \
+ && echo 'deb http://repo.percona.com/ps-80/apt bullseye main' > /etc/apt/sources.list.d/percona.list && \
{ \
echo debconf debconf/frontend select Noninteractive; \
echo percona-server-server-8.0 percona-server-server/root_password password 'unused'; \
@@ -21,7 +21,7 @@ RUN for i in $(seq 1 10); do apt-key adv --no-tty --keyserver keyserver.ubuntu.c
rsync \
libev4 \
# && rm -f /etc/apt/sources.list.d/percona.list \
- && echo 'deb http://repo.percona.com/apt buster main' > /etc/apt/sources.list.d/percona.list \
+ && echo 'deb http://repo.percona.com/apt bullseye main' > /etc/apt/sources.list.d/percona.list \
# { \
# echo debconf debconf/frontend select Noninteractive; \
# echo percona-server-server-8.0 percona-server-server/root_password password 'unused'; \
diff --git a/docker/bootstrap/build.sh b/docker/bootstrap/build.sh
index a3ac24d916d..d84e37fced9 100755
--- a/docker/bootstrap/build.sh
+++ b/docker/bootstrap/build.sh
@@ -47,11 +47,9 @@ fi
chmod -R o=rx *;
arch=$(uname -m)
-[ "$arch" == "aarch64" ] && [ $flavor != "common" ] && arch_ext='-arm64v8'
-
base_image="${base_image:-vitess/bootstrap:$version-common}"
-image="${image:-vitess/bootstrap:$version-$flavor$arch_ext}"
+image="${image:-vitess/bootstrap:$version-$flavor}"
while [ $# -gt 0 ]; do
if [[ $1 == *"--"* ]]; then
@@ -61,6 +59,11 @@ while [ $# -gt 0 ]; do
shift
done
-if [ -f "docker/bootstrap/Dockerfile.$flavor$arch_ext" ]; then
- docker build --no-cache -f docker/bootstrap/Dockerfile.$flavor$arch_ext -t $image --build-arg bootstrap_version=$version --build-arg image=$base_image .
+if [ -f "docker/bootstrap/Dockerfile.$flavor" ]; then
+ docker build \
+ -f docker/bootstrap/Dockerfile.$flavor \
+ -t $image \
+ --build-arg bootstrap_version=$version \
+ --build-arg image=$base_image \
+ .
fi
diff --git a/docker/k8s/Dockerfile b/docker/k8s/Dockerfile
index 30ff33952bc..3ba46595a83 100644
--- a/docker/k8s/Dockerfile
+++ b/docker/k8s/Dockerfile
@@ -19,14 +19,6 @@ FROM vitess/base:${VT_BASE_VER} AS base
FROM debian:${DEBIAN_VER}
-# TODO: remove when https://github.com/vitessio/vitess/issues/3553 is fixed
-RUN apt-get update && \
- apt-get upgrade -qq && \
- apt-get install default-mysql-client -qq --no-install-recommends && \
- apt-get autoremove && \
- apt-get clean && \
- rm -rf /var/lib/apt/lists/*
-
# Set up Vitess environment (just enough to run pre-built Go binaries)
ENV VTROOT /vt/src/vitess.io/vitess
ENV VTDATAROOT /vtdataroot
@@ -51,6 +43,7 @@ COPY --from=base /vt/bin/vtgate /vt/bin/
COPY --from=base /vt/bin/vttablet /vt/bin/
COPY --from=base /vt/bin/vtbackup /vt/bin/
COPY --from=base /vt/bin/vtadmin /vt/bin/
+COPY --from=base /vt/bin/vtorc /vt/bin/
# copy web admin files
COPY --from=base $VTROOT/web /vt/web/
diff --git a/docker/k8s/orchestrator/Dockerfile b/docker/k8s/orchestrator/Dockerfile
deleted file mode 100644
index e3e8f3ac346..00000000000
--- a/docker/k8s/orchestrator/Dockerfile
+++ /dev/null
@@ -1,38 +0,0 @@
-# Copyright 2019 The Vitess Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-ARG VT_BASE_VER=latest
-ARG DEBIAN_VER=stable-slim
-
-FROM vitess/k8s:${VT_BASE_VER} AS k8s
-
-FROM debian:${DEBIAN_VER}
-ARG ORC_VER='3.2.3'
-
-RUN apt-get update && \
- apt-get upgrade -qq && \
- apt-get install wget ca-certificates jq -qq --no-install-recommends && \
- wget https://github.com/openark/orchestrator/releases/download/v${ORC_VER}/orchestrator_${ORC_VER}_amd64.deb && \
- dpkg -i orchestrator_${ORC_VER}_amd64.deb && \
- rm orchestrator_${ORC_VER}_amd64.deb && \
- apt-get purge wget -qq && \
- apt-get autoremove -qq && \
- apt-get clean && \
- rm -rf /var/lib/apt/lists/*
-
-# Copy vtctlclient to be used to notify
-COPY --from=k8s /vt/bin/vtctlclient /usr/bin/
-
-WORKDIR /usr/local/orchestrator
-CMD ["./orchestrator", "--config=/conf/orchestrator.conf.json", "http"]
diff --git a/docker/k8s/vtadmin/Dockerfile b/docker/k8s/vtadmin/Dockerfile
index 837ac8a525a..f952681d3c9 100644
--- a/docker/k8s/vtadmin/Dockerfile
+++ b/docker/k8s/vtadmin/Dockerfile
@@ -17,7 +17,7 @@ ARG DEBIAN_VER=bullseye-slim
FROM vitess/k8s:${VT_BASE_VER} AS k8s
-FROM node:16-${DEBIAN_VER} as node
+FROM node:18-${DEBIAN_VER} as node
# Prepare directory structure.
RUN mkdir -p /vt/web
diff --git a/docker/k8s/pmm-client/Dockerfile b/docker/k8s/vtorc/Dockerfile
similarity index 56%
rename from docker/k8s/pmm-client/Dockerfile
rename to docker/k8s/vtorc/Dockerfile
index 732e2e0a2ee..b62b30ee676 100644
--- a/docker/k8s/pmm-client/Dockerfile
+++ b/docker/k8s/vtorc/Dockerfile
@@ -18,18 +18,21 @@ ARG DEBIAN_VER=stable-slim
FROM vitess/k8s:${VT_BASE_VER} AS k8s
FROM debian:${DEBIAN_VER}
-ARG PMM_CLIENT_VER='1.17.4'
-
-RUN apt-get update && \
- apt-get upgrade -qq && \
- apt-get install procps wget ca-certificates -qq --no-install-recommends && \
- wget https://www.percona.com/redir/downloads/pmm-client/${PMM_CLIENT_VER}/binary/debian/buster/x86_64/pmm-client_${PMM_CLIENT_VER}-1.buster_amd64.deb && \
- dpkg -i pmm-client_${PMM_CLIENT_VER}-1.buster_amd64.deb && \
- rm pmm-client_${PMM_CLIENT_VER}-1.buster_amd64.deb && \
- apt-get purge wget ca-certificates -qq && \
- apt-get autoremove -qq && \
- apt-get clean && \
- rm -rf /var/lib/apt/lists/*
-
-# Copy CA certs for https calls
+
+# Set up Vitess environment (just enough to run pre-built Go binaries)
+ENV VTROOT /vt
+
+# Prepare directory structure.
+RUN mkdir -p /vt/bin && mkdir -p /vtdataroot
+
+# Copy certs to allow https calls
COPY --from=k8s /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
+
+# Copy binaries
+COPY --from=k8s /vt/bin/vtorc /vt/bin/
+
+# add vitess user/group and add permissions
+RUN groupadd -r --gid 2000 vitess && \
+ useradd -r -g vitess --uid 1000 vitess && \
+ chown -R vitess:vitess /vt && \
+ chown -R vitess:vitess /vtdataroot
diff --git a/docker/k8s/vttablet/Dockerfile b/docker/k8s/vttablet/Dockerfile
index 95453a69771..dd504d7860d 100644
--- a/docker/k8s/vttablet/Dockerfile
+++ b/docker/k8s/vttablet/Dockerfile
@@ -19,14 +19,6 @@ FROM vitess/k8s:${VT_BASE_VER} AS k8s
FROM debian:${DEBIAN_VER}
-# TODO: remove when https://github.com/vitessio/vitess/issues/3553 is fixed
-RUN apt-get update && \
- apt-get upgrade -qq && \
- apt-get install wget default-mysql-client jq curl -qq --no-install-recommends && \
- apt-get autoremove && \
- apt-get clean && \
- rm -rf /var/lib/apt/lists/*
-
# Set up Vitess environment (just enough to run pre-built Go binaries)
ENV VTROOT /vt
ENV VTDATAROOT /vtdataroot
diff --git a/docker/lite/Dockerfile.mysql57 b/docker/lite/Dockerfile.mysql57
index 49fcf5ca3f6..c85be4df605 100644
--- a/docker/lite/Dockerfile.mysql57
+++ b/docker/lite/Dockerfile.mysql57
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=18.0
+ARG bootstrap_version=22.1
ARG image="vitess/bootstrap:${bootstrap_version}-mysql57"
FROM "${image}" AS builder
diff --git a/docker/lite/Dockerfile.mysql80 b/docker/lite/Dockerfile.mysql80
index 8d14065066a..831a19e5809 100644
--- a/docker/lite/Dockerfile.mysql80
+++ b/docker/lite/Dockerfile.mysql80
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=18.0
+ARG bootstrap_version=22.1
ARG image="vitess/bootstrap:${bootstrap_version}-mysql80"
FROM "${image}" AS builder
diff --git a/docker/lite/Dockerfile.percona57 b/docker/lite/Dockerfile.percona57
index f06bdc10a8c..324a2760753 100644
--- a/docker/lite/Dockerfile.percona57
+++ b/docker/lite/Dockerfile.percona57
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=18.0
+ARG bootstrap_version=22.1
ARG image="vitess/bootstrap:${bootstrap_version}-percona57"
FROM "${image}" AS builder
@@ -33,7 +33,7 @@ USER vitess
RUN make install PREFIX=/vt/install
# Start over and build the final image.
-FROM debian:buster-slim
+FROM debian:bullseye-slim
# Install dependencies
COPY docker/lite/install_dependencies.sh /vt/dist/install_dependencies.sh
diff --git a/docker/lite/Dockerfile.percona80 b/docker/lite/Dockerfile.percona80
index 5490eb6e79a..e09a0c1dd9f 100644
--- a/docker/lite/Dockerfile.percona80
+++ b/docker/lite/Dockerfile.percona80
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=18.0
+ARG bootstrap_version=22.1
ARG image="vitess/bootstrap:${bootstrap_version}-percona80"
FROM "${image}" AS builder
@@ -33,7 +33,7 @@ USER vitess
RUN make install PREFIX=/vt/install
# Start over and build the final image.
-FROM debian:buster-slim
+FROM debian:bullseye-slim
# Install dependencies
COPY docker/lite/install_dependencies.sh /vt/dist/install_dependencies.sh
diff --git a/docker/lite/Dockerfile.testing b/docker/lite/Dockerfile.testing
index 08a43d99a18..aa47c814f39 100644
--- a/docker/lite/Dockerfile.testing
+++ b/docker/lite/Dockerfile.testing
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=18.0
+ARG bootstrap_version=22.1
ARG image="vitess/bootstrap:${bootstrap_version}-mysql57"
FROM "${image}" AS builder
@@ -33,7 +33,7 @@ USER vitess
RUN make install-testing PREFIX=/vt/install
# Start over and build the final image.
-FROM debian:buster-slim
+FROM debian:bullseye-slim
# Install dependencies
COPY docker/lite/install_dependencies.sh /vt/dist/install_dependencies.sh
diff --git a/docker/lite/Dockerfile.ubi7.mysql57 b/docker/lite/Dockerfile.ubi7.mysql57
index 5307ad98ed5..0f02b151217 100644
--- a/docker/lite/Dockerfile.ubi7.mysql57
+++ b/docker/lite/Dockerfile.ubi7.mysql57
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=18.0
+ARG bootstrap_version=22.1
ARG image="vitess/bootstrap:${bootstrap_version}-mysql57"
FROM "${image}" AS builder
@@ -36,6 +36,7 @@ RUN make install PREFIX=/vt/install
FROM registry.access.redhat.com/ubi7/ubi:latest
# Install keys and dependencies
+RUN rpm --import https://repo.mysql.com/RPM-GPG-KEY-mysql-2022
RUN mkdir /tmp/gpg && chmod 700 /tmp/gpg && export GNUPGHOME=/tmp/gpg \
&& yum install -y --setopt=alwaysprompt=no gnupg \
&& ( gpg --keyserver keyserver.ubuntu.com --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A 4D1BB29D63D98E422B2113B19334A25F8507EFA5 6341AB2753D78A78A7C27BB124C6A8A7F4A80EB5 A4A9406876FCBD3C456770C88C718D3B5072E1F5 ) \
diff --git a/docker/lite/Dockerfile.ubi7.mysql80 b/docker/lite/Dockerfile.ubi7.mysql80
index c8fa02653bf..163be58c32b 100644
--- a/docker/lite/Dockerfile.ubi7.mysql80
+++ b/docker/lite/Dockerfile.ubi7.mysql80
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=18.0
+ARG bootstrap_version=22.1
ARG image="vitess/bootstrap:${bootstrap_version}-mysql80"
FROM "${image}" AS builder
@@ -36,6 +36,7 @@ RUN make install PREFIX=/vt/install
FROM registry.access.redhat.com/ubi7/ubi:latest
# Install keys and dependencies
+RUN rpm --import https://repo.mysql.com/RPM-GPG-KEY-mysql-2022
RUN mkdir /tmp/gpg && chmod 700 /tmp/gpg && export GNUPGHOME=/tmp/gpg \
&& yum install -y --setopt=alwaysprompt=no gnupg \
&& ( gpg --keyserver keyserver.ubuntu.com --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A 4D1BB29D63D98E422B2113B19334A25F8507EFA5 6341AB2753D78A78A7C27BB124C6A8A7F4A80EB5 A4A9406876FCBD3C456770C88C718D3B5072E1F5 ) \
diff --git a/docker/lite/Dockerfile.ubi7.percona57 b/docker/lite/Dockerfile.ubi7.percona57
index b5dc4006b9d..5df1129c6f3 100644
--- a/docker/lite/Dockerfile.ubi7.percona57
+++ b/docker/lite/Dockerfile.ubi7.percona57
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=18.0
+ARG bootstrap_version=22.1
ARG image="vitess/bootstrap:${bootstrap_version}-percona57"
FROM "${image}" AS builder
diff --git a/docker/lite/Dockerfile.ubi7.percona80 b/docker/lite/Dockerfile.ubi7.percona80
index a8a91d117f0..c55a4e2cfdc 100644
--- a/docker/lite/Dockerfile.ubi7.percona80
+++ b/docker/lite/Dockerfile.ubi7.percona80
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=18.0
+ARG bootstrap_version=22.1
ARG image="vitess/bootstrap:${bootstrap_version}-percona80"
FROM "${image}" AS builder
diff --git a/docker/lite/Dockerfile.ubi8.arm64.mysql80 b/docker/lite/Dockerfile.ubi8.arm64.mysql80
index b074e6cc014..fc479c149f1 100644
--- a/docker/lite/Dockerfile.ubi8.arm64.mysql80
+++ b/docker/lite/Dockerfile.ubi8.arm64.mysql80
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=18.0
+ARG bootstrap_version=22.1
ARG image="vitess/bootstrap:${bootstrap_version}-mysql80"
FROM "${image}" AS builder
@@ -36,6 +36,7 @@ RUN make cross-install PREFIX=/vt/install GOOS=linux GOARCH=arm64
FROM registry.access.redhat.com/ubi8/ubi:latest
# Install keys and dependencies
+RUN rpm --import https://repo.mysql.com/RPM-GPG-KEY-mysql-2022
RUN mkdir /tmp/gpg && chmod 700 /tmp/gpg && export GNUPGHOME=/tmp/gpg \
&& yum install -y --setopt=alwaysprompt=no gnupg \
&& ( gpg --keyserver keyserver.ubuntu.com --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A 4D1BB29D63D98E422B2113B19334A25F8507EFA5 99DB70FAE1D7CE227FB6488205B555B38483C65D 3A79BD29 A4A9406876FCBD3C456770C88C718D3B5072E1F5 94E279EB8D8F25B21810ADF121EA45AB2F86D6A1 ) \
@@ -54,7 +55,7 @@ RUN mkdir /tmp/gpg && chmod 700 /tmp/gpg && export GNUPGHOME=/tmp/gpg \
&& rm -f /tmp/mysqlrepo.rpm /tmp/perconarepo.rpm
RUN echo H4sICIDAHmICA2ZvbwDVkDFLxEAQhfv9FVfY7o4RhCBsoXJcIXKHwUIOi7m5MVk2yS6zG0//vYlRULTU4rrHvOHN+2ZL5Q4TP6oeO7bX3Od1pcuFXlyNUzVZg7S2yTmmCwDsgzjuDSUyB5SDI2+QzOChcyJBEnwkPOPQZijNuTkrigKmsHUFJ1MeCjUQEqg61tQweVtM0vOrfXItj1eAM0H0DiR2erTgbnOrV5uVvlk+6M+Kinvctby3p0ptqRziHjOnnxz3s/FnKJcxVlkYu/+k4Zcs+AvM8n3+jWW8MBc2NO6FZILUMEsoYQ76UvWI/vAGB/SOZZsCAAA= | base64 -d | gzip -dc > /etc/yum.repos.d/CentOS-Base.repo \
&& yum install -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs --enablerepo c8base --enablerepo c8updates --enablerepo c8extras libev numactl-libs sysstat strace \
- && yum install -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs https://download-ib01.fedoraproject.org/pub/epel/8/Everything/aarch64/Packages/g/gperftools-libs-2.7-9.el8.aarch64.rpm https://download-ib01.fedoraproject.org/pub/epel/8/Everything/aarch64/Packages/j/jemalloc-5.2.1-2.el8.aarch64.rpm https://download-ib01.fedoraproject.org/pub/epel/8/Everything/aarch64/Packages/l/libunwind-1.3.1-3.el8.aarch64.rpm
+ && yum install -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs https://dl.fedoraproject.org/pub/epel/8/Everything/aarch64/Packages/g/gperftools-libs-2.7-9.el8.aarch64.rpm https://dl.fedoraproject.org/pub/epel/8/Everything/aarch64/Packages/j/jemalloc-5.2.1-2.el8.aarch64.rpm https://dl.fedoraproject.org/pub/epel/8/Everything/aarch64/Packages/l/libunwind-1.3.1-3.el8.aarch64.rpm
RUN yum update -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs \
&& yum install -y --setopt=alwaysprompt=no --setopt=tsflags=nodocs bzip2 ca-certificates gnupg libaio libcurl \
procps-ng rsync wget openssl hostname curl tzdata make \
diff --git a/docker/lite/Dockerfile.ubi8.mysql80 b/docker/lite/Dockerfile.ubi8.mysql80
index 74c4a026171..48c5aaaa086 100644
--- a/docker/lite/Dockerfile.ubi8.mysql80
+++ b/docker/lite/Dockerfile.ubi8.mysql80
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=18.0
+ARG bootstrap_version=22.1
ARG image="vitess/bootstrap:${bootstrap_version}-mysql80"
FROM "${image}" AS builder
@@ -36,6 +36,7 @@ RUN make install PREFIX=/vt/install
FROM registry.access.redhat.com/ubi8/ubi:latest
# Install keys and dependencies
+RUN rpm --import https://repo.mysql.com/RPM-GPG-KEY-mysql-2022
RUN mkdir /tmp/gpg && chmod 700 /tmp/gpg && export GNUPGHOME=/tmp/gpg \
&& yum install -y --setopt=alwaysprompt=no gnupg \
&& ( gpg --keyserver keyserver.ubuntu.com --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A 4D1BB29D63D98E422B2113B19334A25F8507EFA5 99DB70FAE1D7CE227FB6488205B555B38483C65D 3A79BD29 A4A9406876FCBD3C456770C88C718D3B5072E1F5 94E279EB8D8F25B21810ADF121EA45AB2F86D6A1 ) \
diff --git a/docker/lite/install_dependencies.sh b/docker/lite/install_dependencies.sh
index 92f7ab67397..2175df5def3 100755
--- a/docker/lite/install_dependencies.sh
+++ b/docker/lite/install_dependencies.sh
@@ -84,23 +84,25 @@ mysql57)
;;
mysql80)
mysql8_version=8.0.30
- do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/libmysqlclient21_${mysql8_version}-1debian10_amd64.deb /tmp/libmysqlclient21_${mysql8_version}-1debian10_amd64.deb
- do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-community-client-core_${mysql8_version}-1debian10_amd64.deb /tmp/mysql-community-client-core_${mysql8_version}-1debian10_amd64.deb
- do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-community-client-plugins_${mysql8_version}-1debian10_amd64.deb /tmp/mysql-community-client-plugins_${mysql8_version}-1debian10_amd64.deb
- do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-community-client_${mysql8_version}-1debian10_amd64.deb /tmp/mysql-community-client_${mysql8_version}-1debian10_amd64.deb
- do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-client_${mysql8_version}-1debian10_amd64.deb /tmp/mysql-client_${mysql8_version}-1debian10_amd64.deb
- do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-community-server-core_${mysql8_version}-1debian10_amd64.deb /tmp/mysql-community-server-core_${mysql8_version}-1debian10_amd64.deb
- do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-community-server_${mysql8_version}-1debian10_amd64.deb /tmp/mysql-community-server_${mysql8_version}-1debian10_amd64.deb
- do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-server_${mysql8_version}-1debian10_amd64.deb /tmp/mysql-server_${mysql8_version}-1debian10_amd64.deb
+ do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-common_${mysql8_version}-1debian11_amd64.deb /tmp/mysql-common_${mysql8_version}-1debian11_amd64.deb
+ do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/libmysqlclient21_${mysql8_version}-1debian11_amd64.deb /tmp/libmysqlclient21_${mysql8_version}-1debian11_amd64.deb
+ do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-community-client-core_${mysql8_version}-1debian11_amd64.deb /tmp/mysql-community-client-core_${mysql8_version}-1debian11_amd64.deb
+ do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-community-client-plugins_${mysql8_version}-1debian11_amd64.deb /tmp/mysql-community-client-plugins_${mysql8_version}-1debian11_amd64.deb
+ do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-community-client_${mysql8_version}-1debian11_amd64.deb /tmp/mysql-community-client_${mysql8_version}-1debian11_amd64.deb
+ do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-client_${mysql8_version}-1debian11_amd64.deb /tmp/mysql-client_${mysql8_version}-1debian11_amd64.deb
+ do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-community-server-core_${mysql8_version}-1debian11_amd64.deb /tmp/mysql-community-server-core_${mysql8_version}-1debian11_amd64.deb
+ do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-community-server_${mysql8_version}-1debian11_amd64.deb /tmp/mysql-community-server_${mysql8_version}-1debian11_amd64.deb
+ do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-server_${mysql8_version}-1debian11_amd64.deb /tmp/mysql-server_${mysql8_version}-1debian11_amd64.deb
PACKAGES=(
- /tmp/libmysqlclient21_${mysql8_version}-1debian10_amd64.deb
- /tmp/mysql-community-client-core_${mysql8_version}-1debian10_amd64.deb
- /tmp/mysql-community-client-plugins_${mysql8_version}-1debian10_amd64.deb
- /tmp/mysql-community-client_${mysql8_version}-1debian10_amd64.deb
- /tmp/mysql-client_${mysql8_version}-1debian10_amd64.deb
- /tmp/mysql-community-server-core_${mysql8_version}-1debian10_amd64.deb
- /tmp/mysql-community-server_${mysql8_version}-1debian10_amd64.deb
- /tmp/mysql-server_${mysql8_version}-1debian10_amd64.deb
+ /tmp/mysql-common_${mysql8_version}-1debian11_amd64.deb
+ /tmp/libmysqlclient21_${mysql8_version}-1debian11_amd64.deb
+ /tmp/mysql-community-client-core_${mysql8_version}-1debian11_amd64.deb
+ /tmp/mysql-community-client-plugins_${mysql8_version}-1debian11_amd64.deb
+ /tmp/mysql-community-client_${mysql8_version}-1debian11_amd64.deb
+ /tmp/mysql-client_${mysql8_version}-1debian11_amd64.deb
+ /tmp/mysql-community-server-core_${mysql8_version}-1debian11_amd64.deb
+ /tmp/mysql-community-server_${mysql8_version}-1debian11_amd64.deb
+ /tmp/mysql-server_${mysql8_version}-1debian11_amd64.deb
percona-xtrabackup-80
)
;;
@@ -146,18 +148,21 @@ mysql57)
echo 'deb http://repo.mysql.com/apt/debian/ buster mysql-5.7' > /etc/apt/sources.list.d/mysql.list
;;
mysql80)
- echo 'deb http://repo.mysql.com/apt/debian/ buster mysql-8.0' > /etc/apt/sources.list.d/mysql.list
+ echo 'deb http://repo.mysql.com/apt/debian/ bullseye mysql-8.0' > /etc/apt/sources.list.d/mysql.list
;;
esac
# Add extra apt repositories for Percona Server and/or Percona XtraBackup.
case "${FLAVOR}" in
-mysql57|mysql80|percona57)
+mysql57)
echo 'deb http://repo.percona.com/apt buster main' > /etc/apt/sources.list.d/percona.list
;;
+mysql80|percona57)
+ echo 'deb http://repo.percona.com/apt bullseye main' > /etc/apt/sources.list.d/percona.list
+ ;;
percona80)
- echo 'deb http://repo.percona.com/apt buster main' > /etc/apt/sources.list.d/percona.list
- echo 'deb http://repo.percona.com/ps-80/apt buster main' > /etc/apt/sources.list.d/percona80.list
+ echo 'deb http://repo.percona.com/apt bullseye main' > /etc/apt/sources.list.d/percona.list
+ echo 'deb http://repo.percona.com/ps-80/apt bullseye main' > /etc/apt/sources.list.d/percona80.list
;;
esac
diff --git a/docker/local/Dockerfile b/docker/local/Dockerfile
index 1d41173800b..9ef944cfd3b 100644
--- a/docker/local/Dockerfile
+++ b/docker/local/Dockerfile
@@ -1,4 +1,4 @@
-ARG bootstrap_version=18.0
+ARG bootstrap_version=22.1
ARG image="vitess/bootstrap:${bootstrap_version}-common"
FROM "${image}"
@@ -37,6 +37,7 @@ COPY examples/local /vt/local
# Copy the vtadmin web app to the correct location and npm install
COPY --chown=vitess:vitess web /web
RUN npm install /web/vtadmin
+RUN /web/vtadmin/build.sh
RUN mkdir /vt/common
COPY examples/common /vt/common
diff --git a/docker/local/run.sh b/docker/local/run.sh
index 9ba5aa07906..16b07fc426c 100755
--- a/docker/local/run.sh
+++ b/docker/local/run.sh
@@ -1,3 +1,3 @@
#!/bin/bash
-docker run -p 14200:14200 -p 14201:14201 -p 15000:15000 -p 15001:15001 -p 15991:15991 -p 15999:15999 -p 16000:16000 --rm -it vitess/local
+docker run -d -p 14200:14200 -p 14201:14201 -p 15000:15000 -p 15001:15001 -p 15991:15991 -p 15999:15999 -p 16000:16000 --rm -it vitess/local
diff --git a/docker/mini/Dockerfile b/docker/mini/Dockerfile
index f9c14932eb0..469fbef8d9e 100644
--- a/docker/mini/Dockerfile
+++ b/docker/mini/Dockerfile
@@ -31,16 +31,12 @@ RUN ln -s /usr/bin/python3 /usr/bin/python
COPY docker/mini/install_mini_dependencies.sh /vt/dist/install_mini_dependencies.sh
RUN /vt/dist/install_mini_dependencies.sh
-COPY docker/mini/orchestrator-vitess-mini.conf.json /etc/orchestrator.conf.json
-RUN chown vitess:vitess /etc/orchestrator.conf.json
-
COPY docker/mini/docker-entry /vt/dist/docker/mini/docker-entry
COPY examples/common/scripts /vt/dist/scripts
COPY examples/common/env.sh /vt/dist/scripts/env.sh
COPY examples/common/lib/utils.sh /vt/dist/scripts/lib/utils.sh
COPY docker/mini/vtctld-mini-up.sh /vt/dist/scripts/vtctld-mini-up.sh
COPY docker/mini/vttablet-mini-up.sh /vt/dist/scripts/vttablet-mini-up.sh
-COPY docker/mini/orchestrator-up.sh /vt/dist/scripts/orchestrator-up.sh
RUN echo "hostname=127.0.0.1" >> /vt/dist/scripts/env.sh
RUN cat /vt/dist/scripts/env.sh | egrep "^alias" >> /etc/bash.bashrc
diff --git a/docker/mini/orchestrator-up.sh b/docker/mini/orchestrator-up.sh
deleted file mode 100755
index 6e4ff486fad..00000000000
--- a/docker/mini/orchestrator-up.sh
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/bin/bash
-
-source ./env.sh
-
-echo "- Configuring orchestrator with given topology server and credentials..."
-cp /etc/orchestrator.conf.json /tmp/
-sed -i /tmp/orchestrator.conf.json -e "s/DISCOVERY_SEED_PLACEHOLDER/$TOPOLOGY_SERVER/g"
-sed -i /tmp/orchestrator.conf.json -e "s/MYSQL_TOPOLOGY_USER_PLACEHOLDER/$TOPOLOGY_USER/g"
-sed -i /tmp/orchestrator.conf.json -e "s/MYSQL_TOPOLOGY_PASSWORD_PLACEHOLDER/$TOPOLOGY_PASSWORD/g"
-
-cat /tmp/orchestrator.conf.json > /etc/orchestrator.conf.json
-rm /tmp/orchestrator.conf.json
-
-ORCHESTRATOR_LOG="${VTDATAROOT}/tmp/orchestrator.out"
-
-echo "- Starting orchestrator... Logfile is $ORCHESTRATOR_LOG"
-
-cd /usr/local/orchestrator
-./orchestrator http > $ORCHESTRATOR_LOG 2>&1 &
diff --git a/docker/mini/orchestrator-vitess-mini.conf.json b/docker/mini/orchestrator-vitess-mini.conf.json
deleted file mode 100644
index 604801603c2..00000000000
--- a/docker/mini/orchestrator-vitess-mini.conf.json
+++ /dev/null
@@ -1,65 +0,0 @@
-{
- "Debug": true,
- "EnableSyslog": false,
- "ListenAddress": ":3000",
- "MySQLTopologyUser": "MYSQL_TOPOLOGY_USER_PLACEHOLDER",
- "MySQLTopologyPassword": "MYSQL_TOPOLOGY_PASSWORD_PLACEHOLDER",
- "BackendDB": "sqlite",
- "SQLite3DataFile": "/tmp/orchestrator.sqlite3",
- "MySQLConnectTimeoutSeconds": 1,
- "DefaultInstancePort": 3306,
- "DiscoverByShowSlaveHosts": true,
- "InstancePollSeconds": 1,
- "HostnameResolveMethod": "none",
- "MySQLHostnameResolveMethod": "@@report_host",
- "SkipBinlogServerUnresolveCheck": true,
- "ExpiryHostnameResolvesMinutes": 60,
- "VerifyReplicationFilters": false,
- "ReasonableMaintenanceReplicationLagSeconds": 20,
- "CandidateInstanceExpireMinutes": 60,
- "ReadOnly": false,
- "AuthenticationMethod": "",
- "ReplicationLagQuery": "",
- "DetectClusterAliasQuery": "",
- "DetectClusterDomainQuery": "",
- "DetectInstanceAliasQuery": "",
- "DetectPromotionRuleQuery": "",
- "DetectDataCenterQuery": "",
- "DetectRegionQuery": "",
- "DetectPhysicalEnvironmentQuery": "",
- "DetectSemiSyncEnforcedQuery": "",
- "DiscoverySeeds": [
- "DISCOVERY_SEED_PLACEHOLDER"
- ],
- "ServeAgentsHttp": false,
- "UseSSL": false,
- "UseMutualTLS": false,
- "MySQLTopologyUseMixedTLS": false,
- "StatusEndpoint": "/api/status",
- "StatusSimpleHealth": true,
- "StatusOUVerify": false,
- "BinlogEventsChunkSize": 10000,
- "SkipBinlogEventsContaining": [],
- "ReduceReplicationAnalysisCount": false,
- "FailureDetectionPeriodBlockMinutes": 5,
- "FailMasterPromotionOnLagMinutes": 0,
- "RecoveryPeriodBlockSeconds": 0,
- "RecoveryIgnoreHostnameFilters": [],
- "RecoverMasterClusterFilters": [],
- "RecoverIntermediateMasterClusterFilters": [],
- "OnFailureDetectionProcesses": [],
- "PreFailoverProcesses": [],
- "PostFailoverProcesses": [],
- "PostUnsuccessfulFailoverProcesses": [],
- "PostMasterFailoverProcesses": [],
- "PostIntermediateMasterFailoverProcesses": [],
- "CoMasterRecoveryMustPromoteOtherCoMaster": true,
- "DetachLostReplicasAfterMasterFailover": true,
- "ApplyMySQLPromotionAfterMasterFailover": true,
- "PreventCrossDataCenterMasterFailover": false,
- "PreventCrossRegionMasterFailover": true,
- "MasterFailoverDetachReplicaMasterHost": false,
- "MasterFailoverLostInstancesDowntimeMinutes": 0,
- "PostponeReplicaRecoveryOnLagMinutes": 0,
- "RaftEnabled": false
-}
diff --git a/docker/mini/vttablet-mini-up.sh b/docker/mini/vttablet-mini-up.sh
index 4cc86156076..37e74565763 100755
--- a/docker/mini/vttablet-mini-up.sh
+++ b/docker/mini/vttablet-mini-up.sh
@@ -64,7 +64,6 @@ vttablet \
-mycnf_mysql_port $mysql_port \
-service_map 'grpc-queryservice,grpc-tabletmanager,grpc-updatestream' \
-pid_file $VTDATAROOT/$tablet_dir/vttablet.pid \
- -vtctld_addr http://$hostname:$vtctld_web_port/ \
> $VTDATAROOT/$tablet_dir/vttablet.out 2>&1 &
# Block waiting for the tablet to be listening
diff --git a/docker/orchestrator/Dockerfile b/docker/orchestrator/Dockerfile
deleted file mode 100644
index 13622322443..00000000000
--- a/docker/orchestrator/Dockerfile
+++ /dev/null
@@ -1,38 +0,0 @@
-# Copyright 2019 The Vitess Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-FROM debian:jessie
-
-# Install Percona XtraDB Cluster (Galera)
-RUN apt-key adv --keyserver keys.gnupg.net --recv-keys 9334A25F8507EFA5 && \
- echo 'deb http://repo.percona.com/apt jessie main' > /etc/apt/sources.list.d/mysql.list && \
- apt-get update && \
- DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
- percona-xtradb-cluster-server-5.6 && \
- rm -rf /var/lib/apt/lists/*
-
-# Set up Orchestrator database
-RUN service mysql start && \
- mysql -e "CREATE DATABASE orchestrator; GRANT ALL PRIVILEGES ON orchestrator.* TO 'orc_server_user'@'127.0.0.1' IDENTIFIED BY 'orc_server_user_password'" && \
- service mysql stop
-
-# Copy Orchestrator files (placed in workdir by build.sh)
-COPY vtctlclient /usr/bin/vtctlclient
-COPY orchestrator /usr/bin/orchestrator
-COPY orchestrator.conf.json /orc/conf/orchestrator.conf.json
-COPY resources /orc/resources
-
-WORKDIR /orc
-CMD ["/usr/bin/orchestrator", "http"]
-
diff --git a/docker/orchestrator/build.sh b/docker/orchestrator/build.sh
deleted file mode 100755
index 45236582a12..00000000000
--- a/docker/orchestrator/build.sh
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/bash
-
-# Copyright 2019 The Vitess Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -e
-
-tmpdir=`mktemp -d`
-
-script="go install vitess.io/vitess/go/cmd/vtctlclient@latest && \
- git clone https://github.com/openark/orchestrator.git src/github.com/openark/orchestrator && \
- go install github.com/openark/orchestrator/go/cmd/orchestrator"
-
-echo "Building orchestrator..."
-docker run -ti --name=vt_orc_build golang:1.14.4-buster bash -c "$script"
-docker cp vt_orc_build:/go/bin/orchestrator $tmpdir
-docker cp vt_orc_build:/go/bin/vtctlclient $tmpdir
-docker cp vt_orc_build:/go/src/github.com/openark/orchestrator/resources $tmpdir
-docker rm vt_orc_build
-
-echo "Building Docker image..."
-cp Dockerfile orchestrator.conf.json $tmpdir
-(cd $tmpdir && docker build -t vitess/orchestrator .)
-
-# Clean up
-rm -r $tmpdir
diff --git a/docker/orchestrator/orchestrator.conf.json b/docker/orchestrator/orchestrator.conf.json
deleted file mode 100644
index 729594044ed..00000000000
--- a/docker/orchestrator/orchestrator.conf.json
+++ /dev/null
@@ -1,114 +0,0 @@
-{
- "ActiveNodeExpireSeconds": 5,
- "ApplyMySQLPromotionAfterMasterFailover": true,
- "AuditLogFile": "/tmp/orchestrator-audit.log",
- "AuditToSyslog": false,
- "AuthenticationMethod": "",
- "AuthUserHeader": "",
- "BackendDB": "sqlite",
- "BinlogEventsChunkSize": 10000,
- "CandidateInstanceExpireMinutes": 60,
- "CoMasterRecoveryMustPromoteOtherCoMaster": false,
- "DataCenterPattern": "[.]([^.]+)[.][^.]+[.]vitess[.]io",
- "Debug": true,
- "DefaultInstancePort": 3306,
- "DetachLostSlavesAfterMasterFailover": true,
- "DetectClusterAliasQuery": "SELECT value FROM _vt.local_metadata WHERE name='ClusterAlias'",
- "DetectClusterDomainQuery": "",
- "DetectInstanceAliasQuery": "SELECT value FROM _vt.local_metadata WHERE name='Alias'",
- "DetectPromotionRuleQuery": "SELECT value FROM _vt.local_metadata WHERE name='PromotionRule'",
- "DetectDataCenterQuery": "SELECT value FROM _vt.local_metadata WHERE name='DataCenter'",
- "DetectSemiSyncEnforcedQuery": "SELECT @@global.rpl_semi_sync_master_wait_no_slave AND @@global.rpl_semi_sync_master_timeout > 1000000",
- "DiscoverByShowSlaveHosts": false,
- "EnableSyslog": false,
- "ExpiryHostnameResolvesMinutes": 60,
- "DelayMasterPromotionIfSQLThreadNotUpToDate": true,
- "FailureDetectionPeriodBlockMinutes": 10,
- "GraphiteAddr": "",
- "GraphiteConvertHostnameDotsToUnderscores": true,
- "GraphitePath": "",
- "HostnameResolveMethod": "none",
- "HTTPAuthPassword": "",
- "HTTPAuthUser": "",
- "InstanceBulkOperationsWaitTimeoutSeconds": 10,
- "InstancePollSeconds": 5,
- "ListenAddress": ":3000",
- "MasterFailoverLostInstancesDowntimeMinutes": 0,
- "MySQLConnectTimeoutSeconds": 1,
- "MySQLHostnameResolveMethod": "none",
- "MySQLTopologyCredentialsConfigFile": "",
- "MySQLTopologyMaxPoolConnections": 3,
- "MySQLTopologyPassword": "orc_client_user_password",
- "MySQLTopologyReadTimeoutSeconds": 3,
- "MySQLTopologySSLCAFile": "",
- "MySQLTopologySSLCertFile": "",
- "MySQLTopologySSLPrivateKeyFile": "",
- "MySQLTopologySSLSkipVerify": true,
- "MySQLTopologyUseMutualTLS": false,
- "MySQLTopologyUser": "orc_client_user",
- "OnFailureDetectionProcesses": [
- "echo 'Detected {failureType} on {failureCluster}. Affected replicas: {countSlaves}' >> /tmp/recovery.log"
- ],
- "OSCIgnoreHostnameFilters": [
- ],
- "PhysicalEnvironmentPattern": "[.]([^.]+[.][^.]+)[.]vitess[.]io",
- "PostFailoverProcesses": [
- "echo '(for all types) Recovered from {failureType} on {failureCluster}. Failed: {failedHost}:{failedPort}; Successor: {successorHost}:{successorPort}' >> /tmp/recovery.log"
- ],
- "PostIntermediateMasterFailoverProcesses": [
- "echo 'Recovered from {failureType} on {failureCluster}. Failed: {failedHost}:{failedPort}; Successor: {successorHost}:{successorPort}' >> /tmp/recovery.log"
- ],
- "PostMasterFailoverProcesses": [
- "echo 'Recovered from {failureType} on {failureCluster}. Failed: {failedHost}:{failedPort}; Promoted: {successorHost}:{successorPort}' >> /tmp/recovery.log",
- "n=0; until [ $n -ge 10 ]; do vtctlclient -server vtctld:15999 TabletExternallyReparented {successorAlias} && break; n=$[$n+1]; sleep 5; done"
- ],
- "PostponeSlaveRecoveryOnLagMinutes": 0,
- "PostUnsuccessfulFailoverProcesses": [
- ],
- "PowerAuthUsers": [
- "*"
- ],
- "PreFailoverProcesses": [
- "echo 'Will recover from {failureType} on {failureCluster}' >> /tmp/recovery.log"
- ],
- "ProblemIgnoreHostnameFilters": [
- ],
- "PromotionIgnoreHostnameFilters": [
- ],
- "ReadLongRunningQueries": false,
- "ReadOnly": false,
- "ReasonableMaintenanceReplicationLagSeconds": 20,
- "ReasonableReplicationLagSeconds": 10,
- "RecoverMasterClusterFilters": [
- ".*"
- ],
- "RecoveryIgnoreHostnameFilters": [
- ],
- "RecoveryPeriodBlockSeconds": 60,
- "ReduceReplicationAnalysisCount": true,
- "RejectHostnameResolvePattern": "",
- "RemoveTextFromHostnameDisplay": ".vitess.io:3306",
- "ReplicationLagQuery": "",
- "ServeAgentsHttp": false,
- "SkipBinlogEventsContaining": [
- ],
- "SkipBinlogServerUnresolveCheck": true,
- "SkipOrchestratorDatabaseUpdate": false,
- "SlaveStartPostWaitMilliseconds": 1000,
- "SnapshotTopologiesIntervalHours": 0,
- "SQLite3DataFile": ":memory:",
- "SSLCAFile": "",
- "SSLCertFile": "",
- "SSLPrivateKeyFile": "",
- "SSLSkipVerify": false,
- "SSLValidOUs": [
- ],
- "StaleSeedFailMinutes": 60,
- "StatusEndpoint": "/api/status",
- "StatusOUVerify": false,
- "UnseenAgentForgetHours": 6,
- "UnseenInstanceForgetHours": 240,
- "UseMutualTLS": false,
- "UseSSL": false,
- "VerifyReplicationFilters": false
-}
diff --git a/docker/release.sh b/docker/release.sh
index fe9b5333f76..1805421b5af 100755
--- a/docker/release.sh
+++ b/docker/release.sh
@@ -1,9 +1,9 @@
#!/bin/bash
set -ex
-vt_base_version='v17.0.2'
-debian_versions='buster bullseye'
-default_debian_version='bullseye'
+vt_base_version='v18.0.1-SNAPSHOT'
+debian_versions='bullseye bookworm'
+default_debian_version='bookworm'
docker pull --platform linux/amd64 vitess/base:$vt_base_version
@@ -21,6 +21,11 @@ do
docker push vitess/vtadmin:$vt_base_version-$debian_version
if [[ $debian_version == $default_debian_version ]]; then docker push vitess/vtadmin:$vt_base_version; fi
+ docker build --platform linux/amd64 --build-arg VT_BASE_VER=$vt_base_version --build-arg DEBIAN_VER=$debian_version-slim -t vitess/vtorc:$vt_base_version-$debian_version k8s/vtorc
+ docker tag vitess/vtorc:$vt_base_version-$debian_version vitess/vtorc:$vt_base_version
+ docker push vitess/vtorc:$vt_base_version-$debian_version
+ if [[ $debian_version == $default_debian_version ]]; then docker push vitess/vtorc:$vt_base_version; fi
+
docker build --platform linux/amd64 --build-arg VT_BASE_VER=$vt_base_version --build-arg DEBIAN_VER=$debian_version-slim -t vitess/vtgate:$vt_base_version-$debian_version k8s/vtgate
docker tag vitess/vtgate:$vt_base_version-$debian_version vitess/vtgate:$vt_base_version
docker push vitess/vtgate:$vt_base_version-$debian_version
diff --git a/docker/vttestserver/Dockerfile.mysql57 b/docker/vttestserver/Dockerfile.mysql57
index e2fc04eb11f..8f20d4a7855 100644
--- a/docker/vttestserver/Dockerfile.mysql57
+++ b/docker/vttestserver/Dockerfile.mysql57
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=18.0
+ARG bootstrap_version=22.1
ARG image="vitess/bootstrap:${bootstrap_version}-mysql57"
FROM "${image}" AS builder
@@ -33,7 +33,7 @@ USER vitess
RUN make install-testing PREFIX=/vt/install
# Start over and build the final image.
-FROM debian:buster-slim
+FROM debian:bullseye-slim
# Install dependencies
COPY docker/lite/install_dependencies.sh /vt/dist/install_dependencies.sh
diff --git a/docker/vttestserver/Dockerfile.mysql80 b/docker/vttestserver/Dockerfile.mysql80
index 8911a8f00ee..c1dd5eef4f2 100644
--- a/docker/vttestserver/Dockerfile.mysql80
+++ b/docker/vttestserver/Dockerfile.mysql80
@@ -17,7 +17,7 @@
# ensure images contain the right binaries.
# Use a temporary layer for the build stage.
-ARG bootstrap_version=18.0
+ARG bootstrap_version=22.1
ARG image="vitess/bootstrap:${bootstrap_version}-mysql80"
FROM "${image}" AS builder
@@ -33,7 +33,7 @@ USER vitess
RUN make install-testing PREFIX=/vt/install
# Start over and build the final image.
-FROM debian:buster-slim
+FROM debian:bullseye-slim
# Install dependencies
COPY docker/lite/install_dependencies.sh /vt/dist/install_dependencies.sh
diff --git a/examples/backups/restart_tablets.sh b/examples/backups/restart_tablets.sh
index bfafcf26d4f..de812a0ea8e 100755
--- a/examples/backups/restart_tablets.sh
+++ b/examples/backups/restart_tablets.sh
@@ -35,9 +35,9 @@ for i in 300 301 302; do
done
sleep 5
-# Wait for all the replica tablets to be in the serving state before initiating
-# InitShardPrimary. This is essential, since we want the RESTORE phase to be
-# complete before we start InitShardPrimary, otherwise we end up reading the
+# Wait for all the tablets to be in the serving state before initiating
+# PlannedReparentShard. This is essential, since we want the RESTORE phase to be
+# complete before we start PlannedReparentShard, otherwise we end up reading the
# tablet type to RESTORE and do not set semi-sync, which leads to the primary
# hanging on writes.
totalTime=600
@@ -50,6 +50,15 @@ for i in 101 201 301; do
done
done
+for i in 102 202 302; do
+ while [ $totalTime -gt 0 ]; do
+ status=$(curl "http://$hostname:15$i/debug/status_details")
+ echo "$status" | grep "RDONLY: Serving" && break
+ totalTime=$((totalTime-1))
+ sleep 0.1
+ done
+done
+
# Check that all the replica tablets have reached REPLICA: Serving state
for i in 101 201 301; do
status=$(curl "http://$hostname:15$i/debug/status_details")
@@ -57,6 +66,13 @@ for i in 101 201 301; do
echo "tablet-$i did not reach REPLICA: Serving state. Exiting due to failure."
exit 1
done
+# Check that all the rdonly tablets have reached RDONLY: Serving state
+for i in 102 202 302; do
+ status=$(curl "http://$hostname:15$i/debug/status_details")
+ echo "$status" | grep "RDONLY: Serving" && continue
+ echo "tablet-$i did not reach RDONLY: Serving state. Exiting due to failure."
+ exit 1
+done
vtctldclient PlannedReparentShard commerce/0 --new-primary "zone1-100"
vtctldclient PlannedReparentShard customer/-80 --new-primary "zone1-200"
diff --git a/examples/backups/start_cluster.sh b/examples/backups/start_cluster.sh
index 9855171ea4d..33cbb362d88 100755
--- a/examples/backups/start_cluster.sh
+++ b/examples/backups/start_cluster.sh
@@ -22,8 +22,6 @@ source ../common/env.sh
# start topo server
if [ "${TOPO}" = "zk2" ]; then
CELL=zone1 ../common/scripts/zk-up.sh
-elif [ "${TOPO}" = "k8s" ]; then
- CELL=zone1 ../common/scripts/k3s-up.sh
else
CELL=zone1 ../common/scripts/etcd-up.sh
fi
@@ -44,8 +42,8 @@ done
vtctldclient PlannedReparentShard commerce/0 --new-primary "zone1-100"
# create the schema for commerce
-vtctlclient ApplySchema -- --sql-file ./create_commerce_schema.sql commerce || fail "Could not apply schema for the commerce keyspace"
-vtctlclient ApplyVSchema -- --vschema_file ../local/vschema_commerce_seq.json commerce || fail "Could not apply vschema for the commerce keyspace"
+vtctldclient ApplySchema --sql-file ./create_commerce_schema.sql commerce || fail "Could not apply schema for the commerce keyspace"
+vtctldclient ApplyVSchema --vschema-file ../local/vschema_commerce_seq.json commerce || fail "Could not apply vschema for the commerce keyspace"
# Create keyspace and set the semi_sync durability policy.
vtctldclient CreateKeyspace --durability-policy=semi_sync customer || fail "Failed to create and configure the customer keyspace"
@@ -69,8 +67,8 @@ for shard in "-80" "80-"; do
done
# create the schema for customer
-vtctlclient ApplySchema -- --sql-file ./create_customer_schema.sql customer || fail "Could not apply schema for the customer keyspace"
-vtctlclient ApplyVSchema -- --vschema_file ../local/vschema_customer_sharded.json customer || fail "Could not apply vschema for the customer keyspace"
+vtctldclient ApplySchema --sql-file ./create_customer_schema.sql customer || fail "Could not apply schema for the customer keyspace"
+vtctldclient ApplyVSchema --vschema-file ../local/vschema_customer_sharded.json customer || fail "Could not apply vschema for the customer keyspace"
# start vtgate
diff --git a/examples/backups/stop_tablets.sh b/examples/backups/stop_tablets.sh
index 2a45e9e68d2..6a3ced6ab74 100755
--- a/examples/backups/stop_tablets.sh
+++ b/examples/backups/stop_tablets.sh
@@ -20,7 +20,7 @@
source ../common/env.sh
for tablet in 100 200 300; do
- if vtctlclient --action_timeout 1s --server localhost:15999 GetTablet zone1-$tablet >/dev/null 2>&1; then
+ if vtctldclient --action_timeout 1s --server localhost:15999 GetTablet zone1-$tablet >/dev/null 2>&1; then
# The zero tablet is up. Try to shutdown 0-2 tablet + mysqlctl
for i in 0 1 2; do
uid=$(($tablet + $i))
@@ -29,7 +29,7 @@ for tablet in 100 200 300; do
echo "Shutting down mysql zone1-$uid"
CELL=zone1 TABLET_UID=$uid ../common/scripts/mysqlctl-down.sh
echo "Removing tablet directory zone1-$uid"
- vtctlclient DeleteTablet -- --allow_primary=true zone1-$uid
+ vtctldclient DeleteTablets --allow-primary zone1-$uid
rm -Rf $VTDATAROOT/vt_0000000$uid
done
fi
diff --git a/examples/backups/take_backups.sh b/examples/backups/take_backups.sh
index dc1b049c9c3..85935edd2ce 100755
--- a/examples/backups/take_backups.sh
+++ b/examples/backups/take_backups.sh
@@ -20,5 +20,5 @@
source ../common/env.sh
for shard in "customer/-80" "customer/80-" "commerce/0"; do
- vtctlclient BackupShard "${shard}" || fail "Failed to backup shard: ${shard}"
+ vtctldclient BackupShard "${shard}" || fail "Failed to backup shard: ${shard}"
done
diff --git a/examples/backups/upgrade_cluster.sh b/examples/backups/upgrade_cluster.sh
index 0144dc94579..3e831a14360 100755
--- a/examples/backups/upgrade_cluster.sh
+++ b/examples/backups/upgrade_cluster.sh
@@ -39,7 +39,7 @@ for i in 201 202; do
echo "Shutting down mysql zone1-$i"
CELL=zone1 TABLET_UID=$i ../common/scripts/mysqlctl-down.sh
echo "Removing tablet directory zone1-$i"
- vtctlclient DeleteTablet -- --allow_primary=true zone1-$i
+ vtctldclient DeleteTablets --allow-primary zone1-$i
rm -Rf $VTDATAROOT/vt_0000000$i
echo "Starting tablet zone1-$i again"
CELL=zone1 TABLET_UID=$i ../common/scripts/mysqlctl-up.sh
@@ -52,7 +52,7 @@ for i in 301 302; do
echo "Shutting down mysql zone1-$i"
CELL=zone1 TABLET_UID=$i ../common/scripts/mysqlctl-down.sh
echo "Removing tablet directory zone1-$i"
- vtctlclient DeleteTablet -- --allow_primary=true zone1-$i
+ vtctldclient DeleteTablets --allow-primary zone1-$i
rm -Rf $VTDATAROOT/vt_0000000$i
echo "Starting tablet zone1-$i again"
CELL=zone1 TABLET_UID=$i ../common/scripts/mysqlctl-up.sh
@@ -94,4 +94,4 @@ SHARD=-80 CELL=zone1 KEYSPACE=customer TABLET_UID=200 ../common/scripts/vttablet
echo "Restarting tablet zone1-300"
CELL=zone1 TABLET_UID=300 ../common/scripts/vttablet-down.sh
-SHARD=80- CELL=zone1 KEYSPACE=customer TABLET_UID=300 ../common/scripts/vttablet-up.sh
\ No newline at end of file
+SHARD=80- CELL=zone1 KEYSPACE=customer TABLET_UID=300 ../common/scripts/vttablet-up.sh
diff --git a/examples/common/env.sh b/examples/common/env.sh
index dddb40bd08d..24f200b24ef 100644
--- a/examples/common/env.sh
+++ b/examples/common/env.sh
@@ -26,16 +26,13 @@ fi
# mysqld might be in /usr/sbin which will not be in the default PATH
PATH="/usr/sbin:$PATH"
-for binary in mysqld etcd etcdctl curl vtctlclient vttablet vtgate vtctld mysqlctl; do
+for binary in mysqld etcd etcdctl curl vtctldclient vttablet vtgate vtctld mysqlctl; do
command -v "$binary" > /dev/null || fail "${binary} is not installed in PATH. See https://vitess.io/docs/get-started/local/ for install instructions."
done;
-# vtctlclient has a separate alias setup below
+# vtctldclient has a separate alias setup below
for binary in vttablet vtgate vtctld mysqlctl vtorc vtctl; do
- majorVersion=$("${binary}" --version | sed -rn 's/^Version:[[:space:]]*([[:digit:]]+)\.[[:digit:]]+\.[[:digit:]]+.*/\1/p')
- if [[ $majorVersion -gt "16" ]]; then
- alias $binary="$binary --config-file-not-found-handling=ignore"
- fi
+ alias $binary="$binary --config-file-not-found-handling=ignore"
done;
if [ "${TOPO}" = "zk2" ]; then
@@ -59,13 +56,6 @@ if [ "${TOPO}" = "zk2" ]; then
TOPOLOGY_FLAGS="--topo_implementation zk2 --topo_global_server_address ${ZK_SERVER} --topo_global_root /vitess/global"
mkdir -p "${VTDATAROOT}/tmp"
-elif [ "${TOPO}" = "k8s" ]; then
- # Set topology environment parameters.
- K8S_ADDR="localhost"
- K8S_PORT="8443"
- K8S_KUBECONFIG=$VTDATAROOT/tmp/k8s.kubeconfig
- # shellcheck disable=SC2034
- TOPOLOGY_FLAGS="--topo_implementation k8s --topo_k8s_kubeconfig ${K8S_KUBECONFIG} --topo_global_server_address ${K8S_ADDR}:${K8S_PORT} --topo_global_root /vitess/global"
elif [ "${TOPO}" = "consul" ]; then
# Set up topology environment parameters.
CONSUL_SERVER=127.0.0.1
diff --git a/examples/common/lib/utils.sh b/examples/common/lib/utils.sh
index 66af7d31bd7..140e58147e1 100644
--- a/examples/common/lib/utils.sh
+++ b/examples/common/lib/utils.sh
@@ -108,13 +108,13 @@ function wait_for_shard_vreplication_engine() {
local wait_secs=90
for _ in $(seq 1 ${wait_secs}); do
- if vtctlclient --server=localhost:15999 Workflow -- "${keyspace}" listall &>/dev/null; then
+ if vtctldclient --server=localhost:15999 workflow --keyspace "${keyspace}" list &>/dev/null; then
break
fi
sleep 1
done;
- if ! vtctlclient --server=localhost:15999 Workflow -- "${keyspace}" listall &>/dev/null; then
+ if ! vtctldclient --server=localhost:15999 workflow --keyspace "${keyspace}" list &>/dev/null; then
fail "Timed out after ${wait_secs} seconds waiting for the primary tablet's VReplication engine to open in ${keyspace}/${shard}"
fi
}
@@ -139,6 +139,35 @@ function wait_for_healthy_shard() {
wait_for_shard_vreplication_engine "${keyspace}" "${shard}"
}
+# Wait for a workflow to reach the running state. Example:
+# wait_for_workflow_running customer customer2customer
+function wait_for_workflow_running() {
+ if [[ -z ${1} || -z ${2} ]]; then
+ fail "A keyspace and workflow must be specified when waiting for a workflow to reach the running state"
+ fi
+
+ local keyspace=${1}
+ local workflow=${2}
+ local wait_secs=90
+ local result=""
+
+ echo "Waiting for the ${workflow} workflow in the ${keyspace} keyspace to finish the copy phase..."
+
+ for _ in $(seq 1 ${wait_secs}); do
+ result=$(vtctldclient Workflow --keyspace="${keyspace}" show --workflow="${workflow}" 2>/dev/null | grep "Copy phase completed")
+ if [[ ${result} != "" ]]; then
+ break
+ fi
+ sleep 1
+ done;
+
+ if [[ ${result} == "" ]]; then
+ fail "Timed out after ${wait_secs} seconds waiting for the ${workflow} workflow in the ${keyspace} keyspace to reach the running state"
+ fi
+
+ echo "The ${workflow} workflow in the ${keyspace} keyspace is now running. $(sed -rn 's/.*"(Copy phase.*)".*/\1/p' <<< "${result}")."
+}
+
# Stop the specified binary name using the provided PID file.
# Example:
# stop_process "vtadmin-web" "$VTDATAROOT/tmp/vtadmin-web.pid"
diff --git a/examples/common/scripts/etcd-up.sh b/examples/common/scripts/etcd-up.sh
index f2bba3e1470..937db27ea42 100755
--- a/examples/common/scripts/etcd-up.sh
+++ b/examples/common/scripts/etcd-up.sh
@@ -24,7 +24,7 @@ export ETCDCTL_API=2
# Check that etcd is not already running
curl "http://${ETCD_SERVER}" > /dev/null 2>&1 && fail "etcd is already running. Exiting."
-etcd --enable-v2=true --data-dir "${VTDATAROOT}/etcd/" --listen-client-urls "http://${ETCD_SERVER}" --advertise-client-urls "http://${ETCD_SERVER}" > "${VTDATAROOT}"/tmp/etcd.out 2>&1 &
+etcd --data-dir "${VTDATAROOT}/etcd/" --listen-client-urls "http://${ETCD_SERVER}" --advertise-client-urls "http://${ETCD_SERVER}" > "${VTDATAROOT}"/tmp/etcd.out 2>&1 &
PID=$!
echo $PID > "${VTDATAROOT}/tmp/etcd.pid"
sleep 5
@@ -56,6 +56,6 @@ vtctl $TOPOLOGY_FLAGS VtctldCommand AddCellInfo \
$cell
set -e
-echo "etcd start done..."
+echo "etcd is running!"
diff --git a/examples/common/scripts/k3s-down.sh b/examples/common/scripts/k3s-down.sh
deleted file mode 100755
index 195b024bf91..00000000000
--- a/examples/common/scripts/k3s-down.sh
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/bash
-
-# Copyright 2019 The Vitess Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This is an example script that stops the k3s server started by k3s-up.sh.
-
-set -e
-
-source "$(dirname "${BASH_SOURCE[0]:-$0}")/../env.sh"
-
-# Stop K3s server.
-echo "Stopping k3s server..."
-
-pid=`cat $VTDATAROOT/tmp/k3s.pid`
-echo "Stopping k3s..."
-kill -9 $pid
diff --git a/examples/common/scripts/k3s-up.sh b/examples/common/scripts/k3s-up.sh
deleted file mode 100755
index 7c85cb0ac07..00000000000
--- a/examples/common/scripts/k3s-up.sh
+++ /dev/null
@@ -1,57 +0,0 @@
-#!/bin/bash
-
-# Copyright 2019 The Vitess Authors.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This is an example script that creates a Kubernetes api for topo use by running k3s
-
-set -e
-cell=${CELL:-'test'}
-
-script_dir="$(dirname "${BASH_SOURCE[0]:-$0}")"
-source "${script_dir}/../env.sh"
-
-case $(uname) in
- Linux) ;;
- *) echo "WARNING: unsupported platform. K3s only supports running on Linux, the k8s topology is available for local examples."; exit 1;;
-esac
-
-case $(uname -m) in
- aarch64) ;;
- x86_64) ;;
- *) echo "ERROR: unsupported architecture, the k8s topology is not available for local examples."; exit 1;;
-esac
-
-k3s server --disable-agent --data-dir "${VTDATAROOT}/k3s/" --https-listen-port "${K8S_PORT}" --write-kubeconfig "${K8S_KUBECONFIG}" > "${VTDATAROOT}"/tmp/k3s.out 2>&1 &
-PID=$!
-echo $PID > "${VTDATAROOT}/tmp/k3s.pid"
-disown -a
-echo "Waiting for k3s server to start"
-sleep 15
-
-# Use k3s built-in kubectl with custom config
-KUBECTL="k3s kubectl --kubeconfig=${K8S_KUBECONFIG}"
-
-# Create the CRD for vitesstopologynodes
-$KUBECTL create -f "${script_dir}/../../../go/vt/topo/k8stopo/VitessTopoNodes-crd.yaml"
-
-# Add the CellInfo description for the cell
-set +e
-echo "add $cell CellInfo"
-vtctl $TOPOLOGY_FLAGS VtctldCommand AddCellInfo \
- --root /vitess/$cell \
- $cell
-set -e
-
-echo "k3s start done..."
diff --git a/examples/common/scripts/mysqlctl-up.sh b/examples/common/scripts/mysqlctl-up.sh
index d9df27ccdc0..ff20cae5793 100755
--- a/examples/common/scripts/mysqlctl-up.sh
+++ b/examples/common/scripts/mysqlctl-up.sh
@@ -40,3 +40,5 @@ mysqlctl \
--tablet_uid $uid \
--mysql_port $mysql_port \
$action
+
+echo -e "MySQL for tablet $alias is running!"
diff --git a/examples/common/scripts/vtadmin-up.sh b/examples/common/scripts/vtadmin-up.sh
index 2cf4b578332..7fb5d759254 100755
--- a/examples/common/scripts/vtadmin-up.sh
+++ b/examples/common/scripts/vtadmin-up.sh
@@ -1,5 +1,19 @@
#!/bin/bash
+# Copyright 2023 The Vitess Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
script_dir="$(dirname "${BASH_SOURCE[0]:-$0}")"
source "${script_dir}/../env.sh"
@@ -78,6 +92,7 @@ VITE_VTADMIN_API_ADDRESS="http://${hostname}:${vtadmin_api_port}" \
VITE_ENABLE_EXPERIMENTAL_TABLET_DEBUG_VARS="true" \
npm run --prefix "$web_dir" build
+[[ ! -d "$web_dir/build" ]] && fail "Please make sure the VTAdmin files are built in $web_dir/build, using 'make build'"
"${web_dir}/node_modules/.bin/serve" --no-clipboard -l $vtadmin_web_port -s "${web_dir}/build" \
> "${log_dir}/vtadmin-web.out" 2>&1 &
diff --git a/examples/common/scripts/vtgate-up.sh b/examples/common/scripts/vtgate-up.sh
index 07ac2ceee7b..f94c3c32ca0 100755
--- a/examples/common/scripts/vtgate-up.sh
+++ b/examples/common/scripts/vtgate-up.sh
@@ -24,7 +24,7 @@ grpc_port=15991
mysql_server_port=15306
mysql_server_socket_path="/tmp/mysql.sock"
-# Start vtgate.
+echo "Starting vtgate..."
# shellcheck disable=SC2086
vtgate \
$TOPOLOGY_FLAGS \
@@ -40,12 +40,12 @@ vtgate \
--service_map 'grpc-vtgateservice' \
--pid_file $VTDATAROOT/tmp/vtgate.pid \
--mysql_auth_server_config_file $VTROOT/config/user.json \
+ --enable_buffer \
> $VTDATAROOT/tmp/vtgate.out 2>&1 &
# Block waiting for vtgate to be listening
# Not the same as healthy
-echo "Waiting for vtgate to be up..."
while true; do
curl -I "http://$hostname:$web_port/debug/status" >/dev/null 2>&1 && break
sleep 0.1
diff --git a/examples/common/scripts/vtorc-up.sh b/examples/common/scripts/vtorc-up.sh
index 66a826da288..23ca4e62b48 100755
--- a/examples/common/scripts/vtorc-up.sh
+++ b/examples/common/scripts/vtorc-up.sh
@@ -6,6 +6,7 @@ source "${script_dir}/../env.sh"
log_dir="${VTDATAROOT}/tmp"
port=16000
+echo "Starting vtorc..."
vtorc \
$TOPOLOGY_FLAGS \
--logtostderr \
diff --git a/examples/common/scripts/vttablet-up.sh b/examples/common/scripts/vttablet-up.sh
index 90a4239560a..812093666f2 100755
--- a/examples/common/scripts/vttablet-up.sh
+++ b/examples/common/scripts/vttablet-up.sh
@@ -53,9 +53,10 @@ vttablet \
--grpc_port $grpc_port \
--service_map 'grpc-queryservice,grpc-tabletmanager,grpc-updatestream' \
--pid_file $VTDATAROOT/$tablet_dir/vttablet.pid \
- --vtctld_addr http://$hostname:$vtctld_web_port/ \
--queryserver-config-max-result-size 10000000 \
- --heartbeat_enable --heartbeat_interval=250ms --heartbeat_on_demand_duration=5s \
+ --heartbeat_enable \
+ --heartbeat_interval=250ms \
+ --heartbeat_on_demand_duration=5s \
> $VTDATAROOT/$tablet_dir/vttablet.out 2>&1 &
# Block waiting for the tablet to be listening
@@ -68,3 +69,5 @@ done
# check one last time
curl -I "http://$hostname:$port/debug/status" || fail "tablet could not be started!"
+
+echo -e "vttablet for $alias is running!"
diff --git a/examples/compose/README.md b/examples/compose/README.md
index 0070697ca9d..8d20f360620 100644
--- a/examples/compose/README.md
+++ b/examples/compose/README.md
@@ -222,17 +222,7 @@ The vreplication container included performs the following actions;
4. Prints out helpful debug information for you.
```
vitess/examples/compose$ docker-compose logs -f vreplication
-vreplication_1 | + /vt/bin/vtctlclient --server vtctld:15999 VReplicationExec local-0000000101 'insert into _vt.vreplication (db_name, source, pos, max_tps, max_replication_lag, tablet_types, time_updated, transaction_timestamp, state) values('\''commerce'\'', '\''keyspace:\"ext_commerce\" shard:\"0\" filter: > on_ddl:EXEC_IGNORE '\'', '\'''\'', 9999, 9999, '\''primary'\'', 0, 0, '\''Running'\'')'
-vreplication_1 | + /vt/bin/vtctlclient --server vtctld:15999 VReplicationExec local-0000000101 'select * from _vt.vreplication'
-vreplication_1 | +----+----------+--------------------------------+-----+----------+---------+---------------------+------+--------------+--------------+-----------------------+---------+---------+----------+
-vreplication_1 | | id | workflow | source | pos | stop_pos | max_tps | max_replication_lag | cell | tablet_types | time_updated | transaction_timestamp | state | message | db_name |
-vreplication_1 | +----+----------+--------------------------------+-----+----------+---------+---------------------+------+--------------+--------------+-----------------------+---------+---------+----------+
-vreplication_1 | | 1 | | keyspace:"ext_commerce" | | | 9999 | 9999 | | primary | 0 | 0 | Running | | commerce |
-vreplication_1 | | | | shard:"0" | | | | | | | | | | | |
-vreplication_1 | | | | filter: > | | | | | | | | | | | |
-vreplication_1 | | | | on_ddl:EXEC_IGNORE | | | | | | | | | | | |
-vreplication_1 | +----+----------+--------------------------------+-----+----------+---------+---------------------+------+--------------+--------------+-----------------------+---------+---------+----------+
-compose_vreplication_1 exited with code 0
+...
```
### Connect to vgate and run queries
diff --git a/examples/compose/client.go b/examples/compose/client.go
index a4933f21833..8beaef683cd 100644
--- a/examples/compose/client.go
+++ b/examples/compose/client.go
@@ -42,7 +42,6 @@ var (
func main() {
pflag.Parse()
- rand.Seed(time.Now().UnixNano())
// Connect to vtgate.
db, err := vitessdriver.Open(*server, "@primary")
diff --git a/examples/compose/docker-compose.beginners.yml b/examples/compose/docker-compose.beginners.yml
index 1afa23529bd..7f35ddbb034 100644
--- a/examples/compose/docker-compose.beginners.yml
+++ b/examples/compose/docker-compose.beginners.yml
@@ -58,7 +58,7 @@ services:
- "3306"
vtctld:
- image: vitess/lite:v17.0.2
+ image: vitess/lite:v18.0.0
ports:
- "15000:$WEB_PORT"
- "$GRPC_PORT"
@@ -81,7 +81,7 @@ services:
condition: service_healthy
vtgate:
- image: vitess/lite:v17.0.2
+ image: vitess/lite:v18.0.0
ports:
- "15099:$WEB_PORT"
- "$GRPC_PORT"
@@ -111,7 +111,7 @@ services:
condition: service_healthy
schemaload:
- image: vitess/lite:v17.0.2
+ image: vitess/lite:v18.0.0
command:
- sh
- -c
@@ -144,12 +144,12 @@ services:
environment:
- KEYSPACES=$KEYSPACE
- GRPC_PORT=15999
- image: vitess/lite:v17.0.2
+ image: vitess/lite:v18.0.0
volumes:
- .:/script
vttablet100:
- image: vitess/lite:v17.0.2
+ image: vitess/lite:v18.0.0
ports:
- "15100:$WEB_PORT"
- "$GRPC_PORT"
@@ -181,7 +181,7 @@ services:
retries: 15
vttablet101:
- image: vitess/lite:v17.0.2
+ image: vitess/lite:v18.0.0
ports:
- "15101:$WEB_PORT"
- "$GRPC_PORT"
@@ -213,7 +213,7 @@ services:
retries: 15
vttablet102:
- image: vitess/lite:v17.0.2
+ image: vitess/lite:v18.0.0
ports:
- "15102:$WEB_PORT"
- "$GRPC_PORT"
@@ -245,7 +245,7 @@ services:
retries: 15
vttablet103:
- image: vitess/lite:v17.0.2
+ image: vitess/lite:v18.0.0
ports:
- "15103:$WEB_PORT"
- "$GRPC_PORT"
@@ -277,7 +277,7 @@ services:
retries: 15
vtorc:
- image: vitess/lite:v17.0.2
+ image: vitess/lite:v18.0.0
command: ["sh", "-c", "/script/vtorc-up.sh"]
depends_on:
- vtctld
@@ -307,7 +307,7 @@ services:
retries: 15
vreplication:
- image: vitess/lite:v17.0.2
+ image: vitess/lite:v18.0.0
volumes:
- ".:/script"
environment:
diff --git a/examples/compose/docker-compose.yml b/examples/compose/docker-compose.yml
index 6692d0a0575..aa71b1ba82c 100644
--- a/examples/compose/docker-compose.yml
+++ b/examples/compose/docker-compose.yml
@@ -75,7 +75,7 @@ services:
- SCHEMA_FILES=lookup_keyspace_schema_file.sql
- POST_LOAD_FILE=
- EXTERNAL_DB=0
- image: vitess/lite:v17.0.2
+ image: vitess/lite:v18.0.0
volumes:
- .:/script
schemaload_test_keyspace:
@@ -101,7 +101,7 @@ services:
- SCHEMA_FILES=test_keyspace_schema_file.sql
- POST_LOAD_FILE=
- EXTERNAL_DB=0
- image: vitess/lite:v17.0.2
+ image: vitess/lite:v18.0.0
volumes:
- .:/script
set_keyspace_durability_policy:
@@ -115,7 +115,7 @@ services:
environment:
- KEYSPACES=test_keyspace lookup_keyspace
- GRPC_PORT=15999
- image: vitess/lite:v17.0.2
+ image: vitess/lite:v18.0.0
volumes:
- .:/script
vreplication:
@@ -129,7 +129,7 @@ services:
- TOPOLOGY_FLAGS=--topo_implementation consul --topo_global_server_address consul1:8500
--topo_global_root vitess/global
- EXTERNAL_DB=0
- image: vitess/lite:v17.0.2
+ image: vitess/lite:v18.0.0
volumes:
- .:/script
vtctld:
@@ -143,7 +143,7 @@ services:
depends_on:
external_db_host:
condition: service_healthy
- image: vitess/lite:v17.0.2
+ image: vitess/lite:v18.0.0
ports:
- 15000:8080
- "15999"
@@ -160,7 +160,7 @@ services:
--normalize_queries=true '
depends_on:
- vtctld
- image: vitess/lite:v17.0.2
+ image: vitess/lite:v18.0.0
ports:
- 15099:8080
- "15999"
@@ -182,7 +182,7 @@ services:
- EXTERNAL_DB=0
- DB_USER=
- DB_PASS=
- image: vitess/lite:v17.0.2
+ image: vitess/lite:v18.0.0
ports:
- 13000:8080
volumes:
@@ -217,7 +217,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:v17.0.2
+ image: vitess/lite:v18.0.0
ports:
- 15101:8080
- "15999"
@@ -254,7 +254,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:v17.0.2
+ image: vitess/lite:v18.0.0
ports:
- 15102:8080
- "15999"
@@ -291,7 +291,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:v17.0.2
+ image: vitess/lite:v18.0.0
ports:
- 15201:8080
- "15999"
@@ -328,7 +328,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:v17.0.2
+ image: vitess/lite:v18.0.0
ports:
- 15202:8080
- "15999"
@@ -365,7 +365,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:v17.0.2
+ image: vitess/lite:v18.0.0
ports:
- 15301:8080
- "15999"
@@ -402,7 +402,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:v17.0.2
+ image: vitess/lite:v18.0.0
ports:
- 15302:8080
- "15999"
diff --git a/examples/compose/externaldb_vreplication.sh b/examples/compose/externaldb_vreplication.sh
index 1138909aa13..3a5291b18b7 100755
--- a/examples/compose/externaldb_vreplication.sh
+++ b/examples/compose/externaldb_vreplication.sh
@@ -19,19 +19,19 @@ set -ex
VTCTLD_SERVER=${VTCTLD_SERVER:-'vtctld:15999'}
# Wait until source and destination primaries are available
-until (/vt/bin/vtctlclient --server $VTCTLD_SERVER ListAllTablets | grep "ext_" | grep "primary" ); do
+until (/vt/bin/vtctldclient --server $VTCTLD_SERVER GetTablets | grep "ext_" | grep "primary" ); do
echo 'waiting for external primary..';
sleep 1;
done
-until (/vt/bin/vtctlclient --server $VTCTLD_SERVER ListAllTablets | grep -v "ext_" | grep "primary" ); do
+until (/vt/bin/vtctldclient --server $VTCTLD_SERVER GetTablets | grep -v "ext_" | grep "primary" ); do
echo 'waiting for managed primary..';
sleep 1;
done
# Get source and destination tablet and shard information
-TABLET_INFO=$(/vt/bin/vtctlclient --server $VTCTLD_SERVER ListAllTablets)
+TABLET_INFO=$(/vt/bin/vtctldclient --server $VTCTLD_SERVER GetTablets)
source_alias=$(echo "$TABLET_INFO "| grep "ext_" | grep "primary" | awk '{ print $1 }')
dest_alias=$(echo "$TABLET_INFO "| grep -v "ext_" | grep "primary" | awk '{ print $1 }')
source_keyspace=$(echo "$TABLET_INFO "| grep "ext_" | grep "primary" | awk '{ print $2 }')
@@ -43,33 +43,27 @@ dest_tablet=$(echo "$TABLET_INFO "| grep -v "ext_" | grep "primary" | awk '{ pri
# Disable foreign_key checks on destination
-/vt/bin/vtctlclient --server $VTCTLD_SERVER ExecuteFetchAsDba $dest_alias 'SET GLOBAL FOREIGN_KEY_CHECKS=0;'
+/vt/bin/vtctldclient --server $VTCTLD_SERVER ExecuteFetchAsDBA $dest_alias 'SET GLOBAL FOREIGN_KEY_CHECKS=0;'
# Get source_sql mode
-source_sql_mode=$(/vt/bin/vtctlclient --server $VTCTLD_SERVER ExecuteFetchAsDba $source_alias 'SELECT @@GLOBAL.sql_mode' | awk 'NR==4 {print $2}')
+source_sql_mode=$(/vt/bin/vtctldclient --server $VTCTLD_SERVER ExecuteFetchAsDBA $source_alias 'SELECT @@GLOBAL.sql_mode' | awk 'NR==4 {print $2}')
# Apply source sql_mode to destination
# The intention is to avoid replication errors
-/vt/bin/vtctlclient --server $VTCTLD_SERVER ExecuteFetchAsDba $dest_alias "SET GLOBAL sql_mode='$source_sql_mode';"
+/vt/bin/vtctldclient --server $VTCTLD_SERVER ExecuteFetchAsDBA $dest_alias "SET GLOBAL sql_mode='$source_sql_mode';"
# Verify sql_mode matches
-[ $source_sql_mode == $(/vt/bin/vtctlclient --server $VTCTLD_SERVER ExecuteFetchAsDba $dest_alias 'SELECT @@GLOBAL.sql_mode' | awk 'NR==4 {print $2}') ] && \
+[ $source_sql_mode == $(/vt/bin/vtctldclient --server $VTCTLD_SERVER ExecuteFetchAsDBA $dest_alias 'SELECT @@GLOBAL.sql_mode' | awk 'NR==4 {print $2}') ] && \
echo "Source and Destination sql_mode Match." || echo "sql_mode MisMatch"
-until /vt/bin/vtctlclient --server $VTCTLD_SERVER GetSchema $dest_alias; do
+until /vt/bin/vtctldclient --server $VTCTLD_SERVER GetSchema $dest_alias; do
echo "Waiting for destination schema to be ready..";
sleep 3;
done
-# Copy schema from source to destination shard
-/vt/bin/vtctlclient --server $VTCTLD_SERVER CopySchemaShard $source_tablet $dest_tablet || true
-
-# Verify schema
-/vt/bin/vtctlclient --server $VTCTLD_SERVER GetSchema $dest_alias
-
-# Start vreplication
-/vt/bin/vtctlclient --server $VTCTLD_SERVER VReplicationExec $dest_alias 'insert into _vt.vreplication (db_name, source, pos, max_tps, max_replication_lag, tablet_types, time_updated, transaction_timestamp, state) values('"'"''"$dest_keyspace"''"'"', '"'"'keyspace:\"'"$source_keyspace"'\" shard:\"'"$source_shard"'\" filter: > on_ddl:EXEC_IGNORE '"'"', '"'"''"'"', 9999, 9999, '"'"'primary'"'"', 0, 0, '"'"'Running'"'"')'
+# Start vreplication workflow
+/vt/bin/vtctldclient --server $VTCTLD_SERVER MoveTables --workflow ext_commerce2commerce --target-keyspace $dest_keyspace create --source-keyspace $source_keyspace --all-tables
# Check vreplication status
-/vt/bin/vtctlclient --server $VTCTLD_SERVER VReplicationExec $dest_alias 'select * from _vt.vreplication'
+/vt/bin/vtctldclient --server $VTCTLD_SERVER MoveTables --workflow ext_commerce2commerce --target-keyspace $dest_keyspace show
diff --git a/examples/compose/lvtctl.sh b/examples/compose/lvtctl.sh
index 94d4e236395..0b4f16b70c9 100755
--- a/examples/compose/lvtctl.sh
+++ b/examples/compose/lvtctl.sh
@@ -20,5 +20,5 @@ if [[ "$OSTYPE" == "msys" ]]; then
tty=winpty
fi
-# This is a convenience script to run vtctlclient against the local example.
-exec $tty docker-compose exec ${CS:-vtctld} /vt/bin/vtctlclient --server vtctld:15999 "$@"
+# This is a convenience script to run vtctldclient against the local example.
+exec $tty docker-compose exec ${CS:-vtctld} /vt/bin/vtctldclient --server vtctld:15999 "$@"
diff --git a/examples/compose/schemaload.sh b/examples/compose/schemaload.sh
index 0c27dd27026..607c791ce69 100755
--- a/examples/compose/schemaload.sh
+++ b/examples/compose/schemaload.sh
@@ -26,23 +26,23 @@ sleep $sleeptime
if [ ! -f schema_run ]; then
while true; do
- vtctlclient --server vtctld:$GRPC_PORT GetTablet $targettab && break
+ vtctldclient --server vtctld:$GRPC_PORT GetTablet $targettab && break
sleep 1
done
if [ "$external_db" = "0" ]; then
for schema_file in $schema_files; do
echo "Applying Schema ${schema_file} to ${KEYSPACE}"
- vtctlclient --server vtctld:$GRPC_PORT -- ApplySchema --sql-file /script/tables/${schema_file} $KEYSPACE || \
- vtctlclient --server vtctld:$GRPC_PORT -- ApplySchema --sql "$(cat /script/tables/${schema_file})" $KEYSPACE || true
+ vtctldclient --server vtctld:$GRPC_PORT ApplySchema --sql-file /script/tables/${schema_file} $KEYSPACE || \
+ vtctldclient --server vtctld:$GRPC_PORT ApplySchema --sql "$(cat /script/tables/${schema_file})" $KEYSPACE || true
done
fi
echo "Applying VSchema ${vschema_file} to ${KEYSPACE}"
- vtctlclient --server vtctld:$GRPC_PORT -- ApplyVSchema --vschema_file /script/${vschema_file} $KEYSPACE || \
- vtctlclient --server vtctld:$GRPC_PORT -- ApplyVSchema --vschema "$(cat /script/${vschema_file})" $KEYSPACE
+ vtctldclient --server vtctld:$GRPC_PORT ApplyVSchema --vschema-file /script/${vschema_file} $KEYSPACE || \
+ vtctldclient --server vtctld:$GRPC_PORT ApplyVSchema --vschema "$(cat /script/${vschema_file})" $KEYSPACE
echo "List All Tablets"
- vtctlclient --server vtctld:$GRPC_PORT ListAllTablets
+ vtctldclient --server vtctld:$GRPC_PORT GetTablets
if [ -n "$load_file" ]; then
# vtgate can take a REALLY long time to come up fully
diff --git a/examples/compose/vtcompose/docker-compose.test.yml b/examples/compose/vtcompose/docker-compose.test.yml
index 4fdd8363004..ad2995b3a6c 100644
--- a/examples/compose/vtcompose/docker-compose.test.yml
+++ b/examples/compose/vtcompose/docker-compose.test.yml
@@ -79,7 +79,7 @@ services:
- SCHEMA_FILES=test_keyspace_schema_file.sql
- POST_LOAD_FILE=
- EXTERNAL_DB=0
- image: vitess/lite:v17.0.2
+ image: vitess/lite:v18.0.0
volumes:
- .:/script
schemaload_unsharded_keyspace:
@@ -103,7 +103,7 @@ services:
- SCHEMA_FILES=unsharded_keyspace_schema_file.sql
- POST_LOAD_FILE=
- EXTERNAL_DB=0
- image: vitess/lite:v17.0.2
+ image: vitess/lite:v18.0.0
volumes:
- .:/script
set_keyspace_durability_policy_test_keyspace:
@@ -117,7 +117,7 @@ services:
environment:
- GRPC_PORT=15999
- KEYSPACES=test_keyspace
- image: vitess/lite:v17.0.2
+ image: vitess/lite:v18.0.0
volumes:
- .:/script
set_keyspace_durability_policy_unsharded_keyspace:
@@ -130,7 +130,7 @@ services:
environment:
- GRPC_PORT=15999
- KEYSPACES=unsharded_keyspace
- image: vitess/lite:v17.0.2
+ image: vitess/lite:v18.0.0
volumes:
- .:/script
vreplication:
@@ -144,7 +144,7 @@ services:
- TOPOLOGY_FLAGS=--topo_implementation consul --topo_global_server_address consul1:8500
--topo_global_root vitess/global
- EXTERNAL_DB=0
- image: vitess/lite:v17.0.2
+ image: vitess/lite:v18.0.0
volumes:
- .:/script
vtctld:
@@ -159,7 +159,7 @@ services:
depends_on:
external_db_host:
condition: service_healthy
- image: vitess/lite:v17.0.2
+ image: vitess/lite:v18.0.0
ports:
- 15000:8080
- "15999"
@@ -176,7 +176,7 @@ services:
''grpc-vtgateservice'' --normalize_queries=true '
depends_on:
- vtctld
- image: vitess/lite:v17.0.2
+ image: vitess/lite:v18.0.0
ports:
- 15099:8080
- "15999"
@@ -199,7 +199,7 @@ services:
- EXTERNAL_DB=0
- DB_USER=
- DB_PASS=
- image: vitess/lite:v17.0.2
+ image: vitess/lite:v18.0.0
ports:
- 13000:8080
volumes:
@@ -234,7 +234,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:v17.0.2
+ image: vitess/lite:v18.0.0
ports:
- 15101:8080
- "15999"
@@ -271,7 +271,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:v17.0.2
+ image: vitess/lite:v18.0.0
ports:
- 15102:8080
- "15999"
@@ -308,7 +308,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:v17.0.2
+ image: vitess/lite:v18.0.0
ports:
- 15201:8080
- "15999"
@@ -345,7 +345,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:v17.0.2
+ image: vitess/lite:v18.0.0
ports:
- 15202:8080
- "15999"
@@ -382,7 +382,7 @@ services:
- CMD-SHELL
- curl -s --fail --show-error localhost:8080/debug/health
timeout: 10s
- image: vitess/lite:v17.0.2
+ image: vitess/lite:v18.0.0
ports:
- 15301:8080
- "15999"
diff --git a/examples/compose/vtcompose/vtcompose.go b/examples/compose/vtcompose/vtcompose.go
index 2b8ad0f5cf5..b054b6a4d2e 100644
--- a/examples/compose/vtcompose/vtcompose.go
+++ b/examples/compose/vtcompose/vtcompose.go
@@ -533,8 +533,8 @@ func generateDefaultShard(tabAlias int, shard string, keyspaceData keyspaceInfo,
- op: add
path: /services/init_shard_primary%[2]d
value:
- image: vitess/lite:v17.0.2
- command: ["sh", "-c", "/vt/bin/vtctlclient %[5]s InitShardPrimary -force %[4]s/%[3]s %[6]s-%[2]d "]
+ image: vitess/lite:v18.0.0
+ command: ["sh", "-c", "/vt/bin/vtctldclient %[5]s InitShardPrimary --force %[4]s/%[3]s %[6]s-%[2]d "]
%[1]s
`, dependsOn, aliases[0], shard, keyspaceData.keyspace, opts.topologyFlags, opts.cell)
}
@@ -565,7 +565,7 @@ func generateExternalPrimary(
- op: add
path: /services/vttablet%[1]d
value:
- image: vitess/lite:v17.0.2
+ image: vitess/lite:v18.0.0
ports:
- "15%[1]d:%[3]d"
- "%[4]d"
@@ -627,7 +627,7 @@ func generateDefaultTablet(tabAlias int, shard, role, keyspace string, dbInfo ex
- op: add
path: /services/vttablet%[1]d
value:
- image: vitess/lite:v17.0.2
+ image: vitess/lite:v18.0.0
ports:
- "15%[1]d:%[4]d"
- "%[5]d"
@@ -665,7 +665,7 @@ func generateVtctld(opts vtOptions) string {
- op: add
path: /services/vtctld
value:
- image: vitess/lite:v17.0.2
+ image: vitess/lite:v18.0.0
ports:
- "15000:%[1]d"
- "%[2]d"
@@ -696,7 +696,7 @@ func generateVtgate(opts vtOptions) string {
- op: add
path: /services/vtgate
value:
- image: vitess/lite:v17.0.2
+ image: vitess/lite:v18.0.0
ports:
- "15099:%[1]d"
- "%[2]d"
@@ -738,7 +738,7 @@ func generateVTOrc(dbInfo externalDbInfo, keyspaceInfoMap map[string]keyspaceInf
- op: add
path: /services/vtorc
value:
- image: vitess/lite:v17.0.2
+ image: vitess/lite:v18.0.0
volumes:
- ".:/script"
environment:
@@ -763,7 +763,7 @@ func generateVreplication(dbInfo externalDbInfo, opts vtOptions) string {
- op: add
path: /services/vreplication
value:
- image: vitess/lite:v17.0.2
+ image: vitess/lite:v18.0.0
volumes:
- ".:/script"
environment:
@@ -791,7 +791,7 @@ func generateSetKeyspaceDurabilityPolicy(
- op: add
path: /services/set_keyspace_durability_policy_%[3]s
value:
- image: vitess/lite:v17.0.2
+ image: vitess/lite:v18.0.0
volumes:
- ".:/script"
environment:
@@ -828,7 +828,7 @@ func generateSchemaload(
- op: add
path: /services/schemaload_%[7]s
value:
- image: vitess/lite:v17.0.2
+ image: vitess/lite:v18.0.0
volumes:
- ".:/script"
environment:
diff --git a/examples/compose/vttablet-up.sh b/examples/compose/vttablet-up.sh
index 4978840866c..eeae54251f0 100755
--- a/examples/compose/vttablet-up.sh
+++ b/examples/compose/vttablet-up.sh
@@ -108,7 +108,7 @@ sleep $sleeptime
# Create the cell
# https://vitess.io/blog/2020-04-27-life-of-a-cluster/
-$VTROOT/bin/vtctlclient --server vtctld:$GRPC_PORT -- AddCellInfo --root vitess/$CELL --server_address consul1:8500 $CELL || true
+$VTROOT/bin/vtctldclient --server vtctld:$GRPC_PORT AddCellInfo --root vitess/$CELL --server-address consul1:8500 $CELL || true
#Populate external db conditional args
if [ $tablet_role = "externalprimary" ]; then
@@ -154,7 +154,6 @@ exec $VTROOT/bin/vttablet \
--port $web_port \
--grpc_port $grpc_port \
--service_map 'grpc-queryservice,grpc-tabletmanager,grpc-updatestream' \
- --vtctld_addr "http://vtctld:$WEB_PORT/" \
--init_keyspace $keyspace \
--init_shard $shard \
--backup_storage_implementation file \
diff --git a/examples/local/101_initial_cluster.sh b/examples/local/101_initial_cluster.sh
index db4917bdac0..e93ba8ed170 100755
--- a/examples/local/101_initial_cluster.sh
+++ b/examples/local/101_initial_cluster.sh
@@ -31,8 +31,6 @@ SIDECAR_DB_NAME=${SIDECAR_DB_NAME:-"_vt"}
# start topo server
if [ "${TOPO}" = "zk2" ]; then
CELL=zone1 ../common/scripts/zk-up.sh
-elif [ "${TOPO}" = "k8s" ]; then
- CELL=zone1 ../common/scripts/k3s-up.sh
elif [ "${TOPO}" = "consul" ]; then
CELL=zone1 ../common/scripts/consul-up.sh
else
@@ -44,18 +42,29 @@ CELL=zone1 ../common/scripts/vtctld-up.sh
if vtctldclient GetKeyspace commerce > /dev/null 2>&1 ; then
# Keyspace already exists: we could be running this 101 example on an non-empty VTDATAROOT
- vtctldclient --server localhost:15999 SetKeyspaceDurabilityPolicy --durability-policy=semi_sync commerce || fail "Failed to set keyspace durability policy on the commerce keyspace"
+ vtctldclient SetKeyspaceDurabilityPolicy --durability-policy=semi_sync commerce || fail "Failed to set keyspace durability policy on the commerce keyspace"
else
# Create the keyspace with the sidecar database name and set the
# correct durability policy. Please see the comment above for
# more context on using a custom sidecar database name in your
# Vitess clusters.
- vtctldclient --server localhost:15999 CreateKeyspace --sidecar-db-name="${SIDECAR_DB_NAME}" --durability-policy=semi_sync commerce || fail "Failed to create and configure the commerce keyspace"
+ vtctldclient CreateKeyspace --sidecar-db-name="${SIDECAR_DB_NAME}" --durability-policy=semi_sync commerce || fail "Failed to create and configure the commerce keyspace"
fi
+# start mysqlctls for keyspace commerce
+# because MySQL takes time to start, we do this in parallel
+for i in 100 101 102; do
+ CELL=zone1 TABLET_UID=$i ../common/scripts/mysqlctl-up.sh &
+done
+
+# without a sleep, we can have below echo happen before the echo of mysqlctl-up.sh
+sleep 2
+echo "Waiting for mysqlctls to start..."
+wait
+echo "mysqlctls are running!"
+
# start vttablets for keyspace commerce
for i in 100 101 102; do
- CELL=zone1 TABLET_UID=$i ../common/scripts/mysqlctl-up.sh
CELL=zone1 KEYSPACE=commerce TABLET_UID=$i ../common/scripts/vttablet-up.sh
done
diff --git a/examples/local/202_move_tables.sh b/examples/local/202_move_tables.sh
index f385acb12a3..a4a24150973 100755
--- a/examples/local/202_move_tables.sh
+++ b/examples/local/202_move_tables.sh
@@ -19,4 +19,7 @@
source ../common/env.sh
-vtctlclient MoveTables -- --source commerce --tables 'customer,corder' Create customer.commerce2customer
+vtctldclient MoveTables --workflow commerce2customer --target-keyspace customer create --source-keyspace commerce --tables "customer,corder"
+
+# Wait for the workflow to reach the running state.
+wait_for_workflow_running customer commerce2customer
diff --git a/examples/local/203_switch_reads.sh b/examples/local/203_switch_reads.sh
index 4bca7e4e257..a307c583171 100755
--- a/examples/local/203_switch_reads.sh
+++ b/examples/local/203_switch_reads.sh
@@ -19,4 +19,4 @@
source ../common/env.sh
-vtctlclient MoveTables -- --tablet_types=rdonly,replica SwitchTraffic customer.commerce2customer
+vtctldclient MoveTables --workflow commerce2customer --target-keyspace customer switchtraffic --tablet-types "rdonly,replica"
diff --git a/examples/local/204_switch_writes.sh b/examples/local/204_switch_writes.sh
index 743ca1e2512..8305356a1cf 100755
--- a/examples/local/204_switch_writes.sh
+++ b/examples/local/204_switch_writes.sh
@@ -19,4 +19,4 @@
source ../common/env.sh
-vtctlclient MoveTables -- --tablet_types=primary SwitchTraffic customer.commerce2customer
+vtctldclient MoveTables --workflow commerce2customer --target-keyspace customer switchtraffic --tablet-types primary
diff --git a/examples/local/205_clean_commerce.sh b/examples/local/205_clean_commerce.sh
index 5d307a231d3..127437d1d1b 100755
--- a/examples/local/205_clean_commerce.sh
+++ b/examples/local/205_clean_commerce.sh
@@ -19,5 +19,4 @@
source ../common/env.sh
-vtctlclient MoveTables Complete customer.commerce2customer
-
+vtctldclient MoveTables --workflow commerce2customer --target-keyspace customer complete
diff --git a/examples/local/301_customer_sharded.sh b/examples/local/301_customer_sharded.sh
index ad80cdd98dd..7d1fac41dce 100755
--- a/examples/local/301_customer_sharded.sh
+++ b/examples/local/301_customer_sharded.sh
@@ -26,3 +26,4 @@ vtctldclient ApplySchema --sql-file create_commerce_seq.sql commerce || fail "Fa
vtctldclient ApplyVSchema --vschema-file vschema_commerce_seq.json commerce || fail "Failed to create vschema sequences in the commerce keyspace"
vtctldclient ApplyVSchema --vschema-file vschema_customer_sharded.json customer || fail "Failed to create vschema in sharded customer keyspace"
vtctldclient ApplySchema --sql-file create_customer_sharded.sql customer || fail "Failed to create schema in sharded customer keyspace"
+vtctldclient ApplySchema --sql-file create_commerce_seq.sql customer || fail "Failed to create schema in sharded customer keyspace"
diff --git a/examples/local/303_reshard.sh b/examples/local/303_reshard.sh
index ea12987e9ed..5bf36ff7a19 100755
--- a/examples/local/303_reshard.sh
+++ b/examples/local/303_reshard.sh
@@ -19,4 +19,8 @@
source ../common/env.sh
-vtctlclient Reshard -- --source_shards '0' --target_shards '-80,80-' Create customer.cust2cust
+vtctldclient Reshard --workflow cust2cust --target-keyspace customer create --source-shards '0' --target-shards '-80,80-'
+
+# Wait for the workflow to reach the running state.
+wait_for_workflow_running customer cust2cust
+
diff --git a/examples/local/304_switch_reads.sh b/examples/local/304_switch_reads.sh
index 52d6093f4ff..5e4edff7f0d 100755
--- a/examples/local/304_switch_reads.sh
+++ b/examples/local/304_switch_reads.sh
@@ -18,4 +18,4 @@
source ../common/env.sh
-vtctlclient Reshard -- --tablet_types=rdonly,replica SwitchTraffic customer.cust2cust
+vtctldclient Reshard --workflow cust2cust --target-keyspace customer switchtraffic --tablet-types "rdonly,replica"
diff --git a/examples/local/305_switch_writes.sh b/examples/local/305_switch_writes.sh
index 9bbc7ed9ea5..c9bd66b92a5 100755
--- a/examples/local/305_switch_writes.sh
+++ b/examples/local/305_switch_writes.sh
@@ -18,4 +18,5 @@
source ../common/env.sh
-vtctlclient Reshard -- --tablet_types=primary SwitchTraffic customer.cust2cust
+vtctldclient Reshard --workflow cust2cust --target-keyspace customer switchtraffic --tablet-types "primary"
+
diff --git a/examples/local/306_down_shard_0.sh b/examples/local/306_down_shard_0.sh
index db860b3e23c..5c8332f95bc 100755
--- a/examples/local/306_down_shard_0.sh
+++ b/examples/local/306_down_shard_0.sh
@@ -17,7 +17,7 @@
source ../common/env.sh
-vtctlclient Reshard Complete customer.cust2cust
+vtctldclient Reshard --workflow cust2cust --target-keyspace customer complete
for i in 200 201 202; do
CELL=zone1 TABLET_UID=$i ../common/scripts/vttablet-down.sh
diff --git a/examples/local/401_teardown.sh b/examples/local/401_teardown.sh
index ab93f453668..8f3e7844c5a 100755
--- a/examples/local/401_teardown.sh
+++ b/examples/local/401_teardown.sh
@@ -33,8 +33,15 @@ for tablet in 100 200 300 400; do
printf -v alias '%s-%010d' 'zone1' $uid
echo "Shutting down tablet $alias"
CELL=zone1 TABLET_UID=$uid ../common/scripts/vttablet-down.sh
- CELL=zone1 TABLET_UID=$uid ../common/scripts/mysqlctl-down.sh
+ # because MySQL takes time to stop, we do this in parallel
+ CELL=zone1 TABLET_UID=$uid ../common/scripts/mysqlctl-down.sh &
done
+
+ # without a sleep below, we can have the echo happen before the echo of mysqlctl-down.sh
+ sleep 2
+ echo "Waiting mysqlctl to stop..."
+ wait
+ echo "mysqlctls are stopped!"
fi
done
@@ -42,8 +49,6 @@ done
if [ "${TOPO}" = "zk2" ]; then
CELL=zone1 ../common/scripts/zk-down.sh
-elif [ "${TOPO}" = "k8s" ]; then
- CELL=zone1 ../common/scripts/k3s-down.sh
elif [ "${TOPO}" = "consul" ]; then
CELL=zone1 ../common/scripts/consul-down.sh
else
diff --git a/examples/local/README.md b/examples/local/README.md
index cb846b7c8b1..233cd3cacf5 100644
--- a/examples/local/README.md
+++ b/examples/local/README.md
@@ -19,35 +19,38 @@ mysql --table < ../common/select_commerce_data.sql
./201_customer_tablets.sh
# Initiate move tables
-vtctlclient MoveTables -- --source commerce --tables 'customer,corder' Create customer.commerce2customer
+vtctldclient MoveTables --workflow commerce2customer --target-keyspace customer create --source-keyspace commerce --tables "customer,corder"
# Validate
-vtctlclient VDiff customer.commerce2customer
+vtctldclient vdiff --workflow commerce2customer --target-keyspace customer create
+vtctldclient vdiff --workflow commerce2customer --target-keyspace customer show last
# Cut-over
-vtctlclient MoveTables -- --tablet_types=rdonly,replica SwitchTraffic customer.commerce2customer
-vtctlclient MoveTables -- --tablet_types=primary SwitchTraffic customer.commerce2customer
+vtctldclient MoveTables --workflow commerce2customer --target-keyspace customer switchtraffic --tablet-types "rdonly,replica"
+vtctldclient MoveTables --workflow commerce2customer --target-keyspace customer switchtraffic --tablet-types primary
# Clean-up
-vtctlclient MoveTables Complete customer.commerce2customer
+vtctldclient MoveTables --workflow commerce2customer --target-keyspace customer complete
# Prepare for resharding
./301_customer_sharded.sh
./302_new_shards.sh
# Reshard
-vtctlclient Reshard -- --source_shards '0' --target_shards '-80,80-' Create customer.cust2cust
+vtctldclient Reshard --workflow cust2cust --target-keyspace customer create --source-shards '0' --target-shards '-80,80-'
# Validate
-vtctlclient VDiff customer.cust2cust
+vtctldclient vdiff --workflow cust2cust --target-keyspace customer create
+vtctldclient vdiff --workflow cust2cust --target-keyspace customer show last
# Cut-over
-vtctlclient Reshard -- --tablet_types=rdonly,replica SwitchTraffic customer.cust2cust
-vtctlclient Reshard -- --tablet_types=primary SwitchTraffic customer.cust2cust
+vtctldclient Reshard --workflow cust2cust --target-keyspace customer switchtraffic --tablet-types "rdonly,replica"
+vtctldclient Reshard --workflow cust2cust --target-keyspace customer switchtraffic --tablet-types primary
# Down shard 0
+vtctldclient Reshard --workflow cust2cust --target-keyspace customer complete
./306_down_shard_0.sh
-vtctlclient DeleteShard -- --force --recursive customer/0
+vtctldclient DeleteShards --force --recursive customer/0
# Down cluster
./401_teardown.sh
diff --git a/examples/local/vschema_customer_sharded.json b/examples/local/vschema_customer_sharded.json
index 4163cb7c2f9..a74112e9fc7 100644
--- a/examples/local/vschema_customer_sharded.json
+++ b/examples/local/vschema_customer_sharded.json
@@ -15,12 +15,9 @@
],
"auto_increment": {
"column": "customer_id",
- "sequence": "customer_seq"
+ "sequence": "customer.customer_seq"
}
},
- "messages": {
- "pinned": "00"
- },
"corder": {
"column_vindexes": [
{
@@ -30,8 +27,27 @@
],
"auto_increment": {
"column": "order_id",
- "sequence": "order_seq"
+ "sequence": "customer.order_seq"
}
+ },
+ "messages": {
+ "pinned": "00"
+ },
+ "test": {
+ "column_vindexes": [
+ {
+ "column": "id",
+ "name": "hash"
+ }
+ ]
+ },
+ "customer_seq": {
+ "type": "sequence",
+ "pinned": "00"
+ },
+ "order_seq": {
+ "type": "sequence",
+ "pinned": "00"
}
}
}
diff --git a/examples/operator/101_initial_cluster.yaml b/examples/operator/101_initial_cluster.yaml
index 24831bc9e88..d122c27ff05 100644
--- a/examples/operator/101_initial_cluster.yaml
+++ b/examples/operator/101_initial_cluster.yaml
@@ -8,14 +8,14 @@ metadata:
name: example
spec:
images:
- vtctld: vitess/lite:v17.0.2
- vtadmin: vitess/vtadmin:v17.0.2
- vtgate: vitess/lite:v17.0.2
- vttablet: vitess/lite:v17.0.2
- vtbackup: vitess/lite:v17.0.2
- vtorc: vitess/lite:v17.0.2
+ vtctld: vitess/lite:v18.0.0
+ vtadmin: vitess/vtadmin:v18.0.0
+ vtgate: vitess/lite:v18.0.0
+ vttablet: vitess/lite:v18.0.0
+ vtbackup: vitess/lite:v18.0.0
+ vtorc: vitess/lite:v18.0.0
mysqld:
- mysql80Compatible: vitess/lite:v17.0.2
+ mysql80Compatible: vitess/lite:v18.0.0
mysqldExporter: prom/mysqld-exporter:v0.11.0
cells:
- name: zone1
diff --git a/examples/operator/201_customer_tablets.yaml b/examples/operator/201_customer_tablets.yaml
index 1f93549147a..b53aa44c292 100644
--- a/examples/operator/201_customer_tablets.yaml
+++ b/examples/operator/201_customer_tablets.yaml
@@ -4,14 +4,14 @@ metadata:
name: example
spec:
images:
- vtctld: vitess/lite:v17.0.2
- vtadmin: vitess/vtadmin:v17.0.2
- vtgate: vitess/lite:v17.0.2
- vttablet: vitess/lite:v17.0.2
- vtbackup: vitess/lite:v17.0.2
- vtorc: vitess/lite:v17.0.2
+ vtctld: vitess/lite:v18.0.0
+ vtadmin: vitess/vtadmin:v18.0.0
+ vtgate: vitess/lite:v18.0.0
+ vttablet: vitess/lite:v18.0.0
+ vtbackup: vitess/lite:v18.0.0
+ vtorc: vitess/lite:v18.0.0
mysqld:
- mysql80Compatible: vitess/lite:v17.0.2
+ mysql80Compatible: vitess/lite:v18.0.0
mysqldExporter: prom/mysqld-exporter:v0.11.0
cells:
- name: zone1
diff --git a/examples/operator/302_new_shards.yaml b/examples/operator/302_new_shards.yaml
index 87ba78de105..eefba85cc43 100644
--- a/examples/operator/302_new_shards.yaml
+++ b/examples/operator/302_new_shards.yaml
@@ -4,14 +4,14 @@ metadata:
name: example
spec:
images:
- vtctld: vitess/lite:v17.0.2
- vtadmin: vitess/vtadmin:v17.0.2
- vtgate: vitess/lite:v17.0.2
- vttablet: vitess/lite:v17.0.2
- vtbackup: vitess/lite:v17.0.2
- vtorc: vitess/lite:v17.0.2
+ vtctld: vitess/lite:v18.0.0
+ vtadmin: vitess/vtadmin:v18.0.0
+ vtgate: vitess/lite:v18.0.0
+ vttablet: vitess/lite:v18.0.0
+ vtbackup: vitess/lite:v18.0.0
+ vtorc: vitess/lite:v18.0.0
mysqld:
- mysql80Compatible: vitess/lite:v17.0.2
+ mysql80Compatible: vitess/lite:v18.0.0
mysqldExporter: prom/mysqld-exporter:v0.11.0
cells:
- name: zone1
diff --git a/examples/operator/306_down_shard_0.yaml b/examples/operator/306_down_shard_0.yaml
index c9ef0c856e7..090752fc885 100644
--- a/examples/operator/306_down_shard_0.yaml
+++ b/examples/operator/306_down_shard_0.yaml
@@ -4,14 +4,14 @@ metadata:
name: example
spec:
images:
- vtctld: vitess/lite:v17.0.2
- vtadmin: vitess/vtadmin:v17.0.2
- vtgate: vitess/lite:v17.0.2
- vttablet: vitess/lite:v17.0.2
- vtbackup: vitess/lite:v17.0.2
- vtorc: vitess/lite:v17.0.2
+ vtctld: vitess/lite:v18.0.0
+ vtadmin: vitess/vtadmin:v18.0.0
+ vtgate: vitess/lite:v18.0.0
+ vttablet: vitess/lite:v18.0.0
+ vtbackup: vitess/lite:v18.0.0
+ vtorc: vitess/lite:v18.0.0
mysqld:
- mysql80Compatible: vitess/lite:v17.0.2
+ mysql80Compatible: vitess/lite:v18.0.0
mysqldExporter: prom/mysqld-exporter:v0.11.0
cells:
- name: zone1
diff --git a/examples/operator/README.md b/examples/operator/README.md
index de2e598b516..9182b25340c 100644
--- a/examples/operator/README.md
+++ b/examples/operator/README.md
@@ -26,9 +26,9 @@ kubectl apply -f 101_initial_cluster.yaml
# VTAdmin's UI will be available at http://localhost:14000/
./pf.sh &
alias mysql="mysql -h 127.0.0.1 -P 15306 -u user"
-alias vtctlclient="vtctlclient --server localhost:15999 --alsologtostderr"
-vtctlclient ApplySchema -- --sql="$(cat create_commerce_schema.sql)" commerce
-vtctlclient ApplyVSchema -- --vschema="$(cat vschema_commerce_initial.json)" commerce
+alias vtctldclient="vtctldclient --server localhost:15999 --alsologtostderr"
+vtctldclient ApplySchema --sql="$(cat create_commerce_schema.sql)" commerce
+vtctldclient ApplyVSchema --vschema="$(cat vschema_commerce_initial.json)" commerce
# Insert and verify data
mysql < ../common/insert_commerce_data.sql
@@ -38,37 +38,39 @@ mysql --table < ../common/select_commerce_data.sql
kubectl apply -f 201_customer_tablets.yaml
# Initiate move tables
-vtctlclient MoveTables -- --source commerce --tables 'customer,corder' Create customer.commerce2customer
+vtctldclient MoveTables --workflow commerce2customer --target-keyspace customer create --source-keyspace commerce --tables "customer,corder"
# Validate
-vtctlclient VDiff customer.commerce2customer
+vtctldclient vdiff --workflow commerce2customer --target-keyspace customer create
+vtctldclient vdiff --workflow commerce2customer --target-keyspace customer show last
# Cut-over
-vtctlclient MoveTables -- --tablet_types=rdonly,replica SwitchTraffic customer.commerce2customer
-vtctlclient MoveTables -- --tablet_types=primary SwitchTraffic customer.commerce2customer
+vtctldclient MoveTables --workflow commerce2customer --target-keyspace customer switchtraffic --tablet-types "rdonly,replica"
+vtctldclient MoveTables --workflow commerce2customer --target-keyspace customer switchtraffic --tablet-types primary
# Clean-up
-vtctlclient MoveTables Complete customer.commerce2customer
+vtctldclient MoveTables --workflow commerce2customer --target-keyspace customer complete
# Prepare for resharding
-vtctlclient ApplySchema -- --sql="$(cat create_commerce_seq.sql)" commerce
-vtctlclient ApplyVSchema -- --vschema="$(cat vschema_commerce_seq.json)" commerce
-vtctlclient ApplySchema -- --sql="$(cat create_customer_sharded.sql)" customer
-vtctlclient ApplyVSchema -- --vschema="$(cat vschema_customer_sharded.json)" customer
+vtctldclient ApplySchema --sql="$(cat create_commerce_seq.sql)" commerce
+vtctldclient ApplyVSchema --vschema="$(cat vschema_commerce_seq.json)" commerce
+vtctldclient ApplySchema --sql="$(cat create_customer_sharded.sql)" customer
+vtctldclient ApplyVSchema --vschema="$(cat vschema_customer_sharded.json)" customer
kubectl apply -f 302_new_shards.yaml
# Reshard
-vtctlclient Reshard -- --source_shards '-' --target_shards '-80,80-' Create customer.cust2cust
+vtctldclient Reshard --workflow cust2cust --target-keyspace customer create --source-shards '-' --target-shards '-80,80-'
# Validate
-vtctlclient VDiff customer.cust2cust
+vtctldclient vdiff --workflow cust2cust --target-keyspace customer create
+vtctldclient vdiff --workflow cust2cust --target-keyspace customer show last
# Cut-over
-vtctlclient Reshard -- --tablet_types=rdonly,replica SwitchTraffic customer.cust2cust
-vtctlclient Reshard -- --tablet_types=primary SwitchTraffic customer.cust2cust
+vtctldclient Reshard --workflow cust2cust --target-keyspace customer switchtraffic --tablet-types "rdonly,replica"
+vtctldclient Reshard --workflow cust2cust --target-keyspace customer switchtraffic --tablet-types primary
# Down shard 0
-vtctlclient Reshard Complete customer.cust2cust
+vtctldclient Reshard --workflow cust2cust --target-keyspace customer complete
kubectl apply -f 306_down_shard_0.yaml
# Down cluster
diff --git a/examples/operator/operator.yaml b/examples/operator/operator.yaml
index 4c627daed22..3a2b0e66121 100644
--- a/examples/operator/operator.yaml
+++ b/examples/operator/operator.yaml
@@ -6145,7 +6145,7 @@ spec:
fieldPath: metadata.name
- name: OPERATOR_NAME
value: vitess-operator
- image: planetscale/vitess-operator:v2.10.2
+ image: planetscale/vitess-operator:v2.11.0
name: vitess-operator
resources:
limits:
diff --git a/examples/operator/pf.sh b/examples/operator/pf.sh
index 7d784ea2a33..5af7a429667 100755
--- a/examples/operator/pf.sh
+++ b/examples/operator/pf.sh
@@ -8,7 +8,6 @@ kubectl port-forward --address localhost "$(kubectl get service --selector="plan
process_id3=$!
sleep 2
echo "You may point your browser to http://localhost:15000, use the following aliases as shortcuts:"
-echo 'alias vtctlclient="vtctlclient --server=localhost:15999 --logtostderr"'
echo 'alias vtctldclient="vtctldclient --server=localhost:15999 --logtostderr"'
echo 'alias mysql="mysql -h 127.0.0.1 -P 15306 -u user"'
echo "Hit Ctrl-C to stop the port forwards"
diff --git a/examples/region_sharding/101_initial_cluster.sh b/examples/region_sharding/101_initial_cluster.sh
index c2692440189..6dd8989a32f 100755
--- a/examples/region_sharding/101_initial_cluster.sh
+++ b/examples/region_sharding/101_initial_cluster.sh
@@ -22,8 +22,6 @@ source ../common/env.sh
# start topo server
if [ "${TOPO}" = "zk2" ]; then
CELL=zone1 ../common/scripts/zk-up.sh
-elif [ "${TOPO}" = "k8s" ]; then
- CELL=zone1 ../common/scripts/k3s-up.sh
else
CELL=zone1 ../common/scripts/etcd-up.sh
fi
diff --git a/examples/region_sharding/201_main_sharded.sh b/examples/region_sharding/201_main_sharded.sh
index 387f89506db..cb0bb1ff823 100755
--- a/examples/region_sharding/201_main_sharded.sh
+++ b/examples/region_sharding/201_main_sharded.sh
@@ -20,14 +20,14 @@ source ../common/env.sh
vtctldclient ApplyVSchema --vschema-file main_vschema_sharded.json main || fail "Failed to apply vschema for the sharded main keyspace"
# optional: create the schema needed for lookup vindex
-#vtctlclient ApplySchema --sql-file create_lookup_schema.sql main
+#vtctldclient ApplySchema --sql-file create_lookup_schema.sql main
# create the lookup vindex
-vtctlclient CreateLookupVindex -- --tablet_types=PRIMARY main "$(cat lookup_vindex.json)" || fail "Failed to create lookup vindex in main keyspace"
+vtctldclient LookupVindex --name customer_region_lookup --table-keyspace main create --keyspace main --type consistent_lookup_unique --table-owner customer --table-owner-columns=id --tablet-types=PRIMARY || fail "Failed to create lookup vindex in main keyspace"
# we have to wait for replication to catch up
# Can see on vttablet status page Vreplication that copy is complete
sleep 5
# externalize vindex
-vtctlclient ExternalizeVindex main.customer_region_lookup || fail "Failed to externalize customer_region_lookup vindex in the main keyspace"
+vtctldclient LookupVindex --name customer_region_lookup --table-keyspace main externalize --keyspace main || fail "Failed to externalize customer_region_lookup vindex in the main keyspace"
diff --git a/examples/region_sharding/203_reshard.sh b/examples/region_sharding/203_reshard.sh
index aaa448a135d..753b5947623 100755
--- a/examples/region_sharding/203_reshard.sh
+++ b/examples/region_sharding/203_reshard.sh
@@ -16,4 +16,4 @@
source ../common/env.sh
-vtctlclient Reshard -- --source_shards '0' --target_shards '-40,40-80,80-c0,c0-' --tablet_types=PRIMARY Create main.main2regions
+vtctldclient reshard --workflow main2regions --target-keyspace main create --source-shards '0' --target-shards '-40,40-80,80-c0,c0-' --tablet-types=PRIMARY
diff --git a/examples/region_sharding/204_switch_reads.sh b/examples/region_sharding/204_switch_reads.sh
index 20703938199..570d5f60f9c 100755
--- a/examples/region_sharding/204_switch_reads.sh
+++ b/examples/region_sharding/204_switch_reads.sh
@@ -18,4 +18,4 @@
source ../common/env.sh
-vtctlclient Reshard -- --tablet_types=rdonly,replica SwitchTraffic main.main2regions
+vtctldclient reshard --workflow main2regions --target-keyspace main SwitchTraffic --tablet-types=rdonly,replica
diff --git a/examples/region_sharding/205_switch_writes.sh b/examples/region_sharding/205_switch_writes.sh
index ad0d8ee51d2..981aa016d56 100755
--- a/examples/region_sharding/205_switch_writes.sh
+++ b/examples/region_sharding/205_switch_writes.sh
@@ -18,7 +18,7 @@
source ../common/env.sh
-vtctlclient Reshard -- --tablet_types=primary SwitchTraffic main.main2regions
+vtctldclient reshard --workflow main2regions --target-keyspace main SwitchTraffic --tablet-types=primary
# to go back to unsharded
# call Reshard ReverseTraffic with all tablet types
diff --git a/examples/region_sharding/301_teardown.sh b/examples/region_sharding/301_teardown.sh
index 25f3bb259f2..ee86772a4f2 100755
--- a/examples/region_sharding/301_teardown.sh
+++ b/examples/region_sharding/301_teardown.sh
@@ -26,7 +26,7 @@ source ../common/env.sh
../common/scripts/vtgate-down.sh
for tablet in 100 200 300 400 500; do
- if vtctlclient --server localhost:15999 GetTablet zone1-$tablet >/dev/null 2>&1; then
+ if vtctldclient --server localhost:15999 GetTablet zone1-$tablet >/dev/null 2>&1; then
printf -v alias '%s-%010d' 'zone1' $tablet
echo "Shutting down tablet $alias"
CELL=zone1 TABLET_UID=$tablet ../common/scripts/vttablet-down.sh
@@ -38,8 +38,6 @@ done
if [ "${TOPO}" = "zk2" ]; then
CELL=zone1 ../common/scripts/zk-down.sh
-elif [ "${TOPO}" = "k8s" ]; then
- CELL=zone1 ../common/scripts/k3s-down.sh
else
CELL=zone1 ../common/scripts/etcd-down.sh
fi
diff --git a/go.mod b/go.mod
index e794bb447e8..23505f06ea4 100644
--- a/go.mod
+++ b/go.mod
@@ -1,6 +1,6 @@
module vitess.io/vitess
-go 1.20
+go 1.21
require (
cloud.google.com/go/storage v1.29.0
@@ -9,10 +9,9 @@ require (
github.com/Azure/azure-storage-blob-go v0.15.0
github.com/DataDog/datadog-go v4.8.3+incompatible
github.com/HdrHistogram/hdrhistogram-go v0.9.0 // indirect
- github.com/PuerkitoBio/goquery v1.5.1
github.com/aquarapid/vaultlib v0.5.1
github.com/armon/go-metrics v0.4.1 // indirect
- github.com/aws/aws-sdk-go v1.44.192
+ github.com/aws/aws-sdk-go v1.44.258
github.com/buger/jsonparser v1.1.1
github.com/cespare/xxhash/v2 v2.2.0
github.com/corpix/uarand v0.1.1 // indirect
@@ -21,7 +20,7 @@ require (
github.com/fsnotify/fsnotify v1.6.0
github.com/go-sql-driver/mysql v1.7.0
github.com/golang/glog v1.0.0
- github.com/golang/protobuf v1.5.2
+ github.com/golang/protobuf v1.5.3
github.com/golang/snappy v0.0.4
github.com/google/go-cmp v0.5.9
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
@@ -30,11 +29,10 @@ require (
github.com/gorilla/mux v1.8.0
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
- github.com/hashicorp/consul/api v1.18.0
+ github.com/hashicorp/consul/api v1.20.0
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
github.com/hashicorp/serf v0.10.1 // indirect
github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428
- github.com/imdario/mergo v0.3.13 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/klauspost/compress v1.16.5
github.com/klauspost/pgzip v1.2.5
@@ -52,16 +50,16 @@ require (
github.com/pires/go-proxyproto v0.6.2
github.com/pkg/errors v0.9.1
github.com/planetscale/pargzip v0.0.0-20201116224723-90c7fc03ea8a
- github.com/planetscale/vtprotobuf v0.4.0
- github.com/prometheus/client_golang v1.14.0
- github.com/prometheus/common v0.39.0 // indirect
+ github.com/planetscale/vtprotobuf v0.5.0
+ github.com/prometheus/client_golang v1.15.1
+ github.com/prometheus/common v0.43.0 // indirect
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475
github.com/sjmudd/stopwatch v0.1.1
github.com/soheilhy/cmux v0.1.5
github.com/spf13/cobra v1.6.1
github.com/spf13/pflag v1.0.5
github.com/spf13/viper v1.15.0
- github.com/stretchr/testify v1.8.1
+ github.com/stretchr/testify v1.8.2
github.com/tchap/go-patricia v2.3.0+incompatible
github.com/tidwall/gjson v1.12.1
github.com/tinylib/msgp v1.1.8 // indirect
@@ -69,148 +67,126 @@ require (
github.com/uber/jaeger-lib v2.4.1+incompatible // indirect
github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82
github.com/z-division/go-zookeeper v1.0.0
- go.etcd.io/etcd/api/v3 v3.5.7
- go.etcd.io/etcd/client/pkg/v3 v3.5.7
- go.etcd.io/etcd/client/v3 v3.5.7
+ go.etcd.io/etcd/api/v3 v3.5.8
+ go.etcd.io/etcd/client/pkg/v3 v3.5.8
+ go.etcd.io/etcd/client/v3 v3.5.8
go.uber.org/mock v0.2.0
- golang.org/x/crypto v0.12.0 // indirect
+ golang.org/x/crypto v0.14.0 // indirect
golang.org/x/mod v0.12.0 // indirect
- golang.org/x/net v0.14.0
- golang.org/x/oauth2 v0.4.0
- golang.org/x/sys v0.11.0 // indirect
- golang.org/x/term v0.11.0
- golang.org/x/text v0.12.0
+ golang.org/x/net v0.17.0
+ golang.org/x/oauth2 v0.7.0
+ golang.org/x/sys v0.13.0
+ golang.org/x/term v0.13.0
+ golang.org/x/text v0.13.0
golang.org/x/time v0.3.0
- golang.org/x/tools v0.12.0
- google.golang.org/api v0.109.0
- google.golang.org/genproto v0.0.0-20230131230820-1c016267d619 // indirect
- google.golang.org/grpc v1.52.3
- google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0
+ golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846
+ google.golang.org/api v0.121.0
+ google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect
+ google.golang.org/grpc v1.55.0-dev
+ google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0
google.golang.org/grpc/examples v0.0.0-20210430044426-28078834f35b
- google.golang.org/protobuf v1.28.1
- gopkg.in/DataDog/dd-trace-go.v1 v1.47.0
+ google.golang.org/protobuf v1.30.0
+ gopkg.in/DataDog/dd-trace-go.v1 v1.50.1
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect
- gopkg.in/gcfg.v1 v1.2.3
gopkg.in/ldap.v2 v2.5.1
- gopkg.in/warnings.v0 v0.1.2 // indirect
gotest.tools v2.2.0+incompatible
- k8s.io/apiextensions-apiserver v0.18.19
- k8s.io/apimachinery v0.26.1
- k8s.io/client-go v0.26.1
- k8s.io/code-generator v0.26.1
sigs.k8s.io/yaml v1.3.0
)
require (
github.com/Shopify/toxiproxy/v2 v2.5.0
github.com/bndr/gotabulate v1.1.2
- github.com/golang/mock v1.6.0
+ github.com/gammazero/deque v0.2.1
github.com/google/safehtml v0.1.0
github.com/hashicorp/go-version v1.6.0
github.com/kr/pretty v0.3.1
github.com/kr/text v0.2.0
github.com/mitchellh/mapstructure v1.5.0
github.com/nsf/jsondiff v0.0.0-20210926074059-1e845ec5d249
+ github.com/spf13/afero v1.9.3
github.com/spf13/jwalterweatherman v1.1.0
github.com/xlab/treeprint v1.2.0
+ go.uber.org/goleak v1.2.1
golang.org/x/exp v0.0.0-20230131160201-f062dba9d201
golang.org/x/sync v0.3.0
- k8s.io/utils v0.0.0-20230115233650-391b47cb4029
modernc.org/sqlite v1.20.3
)
require (
- cloud.google.com/go v0.109.0 // indirect
- cloud.google.com/go/compute v1.18.0 // indirect
+ cloud.google.com/go v0.110.0 // indirect
+ cloud.google.com/go/compute v1.19.0 // indirect
cloud.google.com/go/compute/metadata v0.2.3 // indirect
- cloud.google.com/go/iam v0.10.0 // indirect
- github.com/DataDog/datadog-agent/pkg/obfuscate v0.42.0 // indirect
- github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.42.0 // indirect
+ cloud.google.com/go/iam v0.13.0 // indirect
+ github.com/DataDog/appsec-internal-go v1.0.0 // indirect
+ github.com/DataDog/datadog-agent/pkg/obfuscate v0.43.1 // indirect
+ github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.45.0-rc.1 // indirect
github.com/DataDog/datadog-go/v5 v5.2.0 // indirect
+ github.com/DataDog/go-libddwaf v1.1.0 // indirect
github.com/DataDog/go-tuf v0.3.0--fix-localmeta-fork // indirect
github.com/DataDog/sketches-go v1.4.1 // indirect
github.com/Microsoft/go-winio v0.6.0 // indirect
- github.com/andybalholm/cascadia v1.1.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/coreos/go-semver v0.3.1 // indirect
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
- github.com/cyphar/filepath-securejoin v0.2.3 // indirect
+ github.com/cyphar/filepath-securejoin v0.2.4 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
- github.com/dgraph-io/ristretto v0.1.1 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
- github.com/emicklei/go-restful/v3 v3.10.1 // indirect
- github.com/fatih/color v1.14.1 // indirect
+ github.com/fatih/color v1.15.0 // indirect
github.com/felixge/httpsnoop v1.0.3 // indirect
- github.com/go-logr/logr v1.2.3 // indirect
- github.com/go-openapi/jsonpointer v0.19.6 // indirect
- github.com/go-openapi/jsonreference v0.20.2 // indirect
- github.com/go-openapi/swag v0.22.3 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
- github.com/google/gnostic v0.6.9 // indirect
- github.com/google/gofuzz v1.2.0 // indirect
- github.com/googleapis/enterprise-certificate-proxy v0.2.1 // indirect
- github.com/googleapis/gax-go/v2 v2.7.0 // indirect
+ github.com/google/btree v1.0.1 // indirect
+ github.com/google/s2a-go v0.1.3 // indirect
+ github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect
+ github.com/googleapis/gax-go/v2 v2.8.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
- github.com/hashicorp/go-hclog v1.4.0 // indirect
+ github.com/hashicorp/go-hclog v1.5.0 // indirect
github.com/hashicorp/go-rootcerts v1.0.2 // indirect
github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
- github.com/josharian/intern v1.0.0 // indirect
- github.com/json-iterator/go v1.1.12 // indirect
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect
- github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-colorable v0.1.13 // indirect
- github.com/mattn/go-ieproxy v0.0.9 // indirect
- github.com/mattn/go-isatty v0.0.17 // indirect
+ github.com/mattn/go-ieproxy v0.0.10 // indirect
+ github.com/mattn/go-isatty v0.0.18 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
- github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
- github.com/modern-go/reflect2 v1.0.2 // indirect
- github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
- github.com/pelletier/go-toml/v2 v2.0.6 // indirect
+ github.com/onsi/gomega v1.23.0 // indirect
+ github.com/outcaste-io/ristretto v0.2.1 // indirect
+ github.com/pelletier/go-toml/v2 v2.0.7 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
- github.com/prometheus/client_model v0.3.0 // indirect
+ github.com/prometheus/client_model v0.4.0 // indirect
github.com/prometheus/procfs v0.9.0 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
- github.com/rivo/uniseg v0.4.3 // indirect
- github.com/rogpeppe/go-internal v1.9.0 // indirect
+ github.com/rivo/uniseg v0.4.4 // indirect
+ github.com/rogpeppe/go-internal v1.10.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
- github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect
- github.com/spf13/afero v1.9.3 // indirect
+ github.com/secure-systems-lab/go-securesystemslib v0.5.0 // indirect
github.com/spf13/cast v1.5.0 // indirect
github.com/subosito/gotenv v1.4.2 // indirect
github.com/tidwall/match v1.1.1 // indirect
github.com/tidwall/pretty v1.2.0 // indirect
go.opencensus.io v0.24.0 // indirect
- go.uber.org/atomic v1.10.0 // indirect
- go.uber.org/multierr v1.9.0 // indirect
- go.uber.org/zap v1.23.0 // indirect
- go4.org/intern v0.0.0-20220617035311-6925f38cc365 // indirect
- go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760 // indirect
+ go.uber.org/atomic v1.11.0 // indirect
+ go.uber.org/multierr v1.11.0 // indirect
+ go.uber.org/zap v1.24.0 // indirect
+ go4.org/intern v0.0.0-20230205224052-192e9f60865c // indirect
+ go4.org/unsafe/assume-no-moving-gc v0.0.0-20230426161633-7e06285ff160 // indirect
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
google.golang.org/appengine v1.6.7 // indirect
- gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
inet.af/netaddr v0.0.0-20220811202034-502d2d690317 // indirect
- k8s.io/api v0.26.1 // indirect
- k8s.io/gengo v0.0.0-20221011193443-fad74ee6edd9 // indirect
- k8s.io/klog/v2 v2.90.0 // indirect
- k8s.io/kube-openapi v0.0.0-20230202010329-39b3636cbaa3 // indirect
lukechampine.com/uint128 v1.2.0 // indirect
modernc.org/cc/v3 v3.40.0 // indirect
modernc.org/ccgo/v3 v3.16.13 // indirect
- modernc.org/libc v1.22.2 // indirect
+ modernc.org/libc v1.22.5 // indirect
modernc.org/mathutil v1.5.0 // indirect
modernc.org/memory v1.5.0 // indirect
modernc.org/opt v0.1.3 // indirect
modernc.org/strutil v1.1.3 // indirect
modernc.org/token v1.1.0 // indirect
- sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
- sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
)
diff --git a/go.sum b/go.sum
index 2edff808e6c..baeeda160ff 100644
--- a/go.sum
+++ b/go.sum
@@ -17,23 +17,24 @@ cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHOb
cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
-cloud.google.com/go v0.109.0 h1:38CZoKGlCnPZjGdyj0ZfpoGae0/wgNfy5F0byyxg0Gk=
-cloud.google.com/go v0.109.0/go.mod h1:2sYycXt75t/CSB5R9M2wPU1tJmire7AQZTPtITcGBVE=
+cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys=
+cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
-cloud.google.com/go/compute v1.18.0 h1:FEigFqoDbys2cvFkZ9Fjq4gnHBP55anJ0yQyau2f9oY=
-cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs=
+cloud.google.com/go/compute v1.19.0 h1:+9zda3WGgW1ZSTlVppLCYFIr48Pa35q1uG2N1itbCEQ=
+cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU=
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
-cloud.google.com/go/iam v0.10.0 h1:fpP/gByFs6US1ma53v7VxhvbJpO2Aapng6wabJ99MuI=
-cloud.google.com/go/iam v0.10.0/go.mod h1:nXAECrMt2qHpF6RZUZseteD6QyanL68reN4OXPw0UWM=
-cloud.google.com/go/longrunning v0.3.0 h1:NjljC+FYPV3uh5/OwWT6pVU+doBqMg2x/rZlE+CamDs=
+cloud.google.com/go/iam v0.13.0 h1:+CmB+K0J/33d0zSQ9SlFWUeCCEn5XJA0ZMZ3pHE9u8k=
+cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0=
+cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM=
+cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
@@ -53,40 +54,37 @@ github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVt
github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k=
github.com/Azure/azure-storage-blob-go v0.15.0 h1:rXtgp8tN1p29GvpGgfJetavIG0V7OgcSXPpwp3tx6qk=
github.com/Azure/azure-storage-blob-go v0.15.0/go.mod h1:vbjsVbX0dlxnRc4FFMPsS9BsJWPcne7GB7onqlPvz58=
-github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
-github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs=
-github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
-github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
github.com/Azure/go-autorest/autorest/adal v0.9.13 h1:Mp5hbtOePIzM8pJVRa3YLrWWmZtoxRXqUEzCfJt3+/Q=
github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
-github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
-github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
-github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
-github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg=
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
-github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo=
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
-github.com/DataDog/datadog-agent/pkg/obfuscate v0.42.0 h1:p9uCmbyi4gEbJAOLoT/GjIAQMGe3velLmiC3mMgSIy4=
-github.com/DataDog/datadog-agent/pkg/obfuscate v0.42.0/go.mod h1:7Bsrm5U8/B+B8dffT3t733tDvdCr7upqIPSVuDqJ0Mw=
-github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.42.0 h1:b/RFr5T6HcEOKoXfKFOqZf33hsUbvskY1F5LDld7HCI=
-github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.42.0/go.mod h1:VVMDDibJxYEkwcLdZBT2g8EHKpbMT4JdOhRbQ9GdjbM=
+github.com/DataDog/appsec-internal-go v1.0.0 h1:2u5IkF4DBj3KVeQn5Vg2vjPUtt513zxEYglcqnd500U=
+github.com/DataDog/appsec-internal-go v1.0.0/go.mod h1:+Y+4klVWKPOnZx6XESG7QHydOaUGEXyH2j/vSg9JiNM=
+github.com/DataDog/datadog-agent/pkg/obfuscate v0.43.1 h1:HG4dOM6Ou+zZsaKC++4kpM9VGJ/TYo9X61LPz2mmjDE=
+github.com/DataDog/datadog-agent/pkg/obfuscate v0.43.1/go.mod h1:o+rJy3B2o+Zb+wCgLSkMlkD7EiUEA5Q63cid53fZkQY=
+github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.45.0-rc.1 h1:0OK84DbAucLUwoDYoBFve1cuhDWtoquruVVDjgucYlI=
+github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.45.0-rc.1/go.mod h1:VVMDDibJxYEkwcLdZBT2g8EHKpbMT4JdOhRbQ9GdjbM=
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/DataDog/datadog-go v4.8.3+incompatible h1:fNGaYSuObuQb5nzeTQqowRAd9bpDIRRV4/gUtIBjh8Q=
github.com/DataDog/datadog-go v4.8.3+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/DataDog/datadog-go/v5 v5.1.1/go.mod h1:KhiYb2Badlv9/rofz+OznKoEF5XKTonWyhx5K83AP8E=
github.com/DataDog/datadog-go/v5 v5.2.0 h1:kSptqUGSNK67DgA+By3rwtFnAh6pTBxJ7Hn8JCLZcKY=
github.com/DataDog/datadog-go/v5 v5.2.0/go.mod h1:XRDJk1pTc00gm+ZDiBKsjh7oOOtJfYfglVCmFb8C2+Q=
+github.com/DataDog/go-libddwaf v1.1.0 h1:PhlI/31yxu88JEgTYqxffhd8oM4KQMfNWUVyICqIDMY=
+github.com/DataDog/go-libddwaf v1.1.0/go.mod h1:DI5y8obPajk+Tvy2o+nZc2g/5Ria/Rfq5/624k7pHpE=
github.com/DataDog/go-tuf v0.3.0--fix-localmeta-fork h1:yBq5PrAtrM4yVeSzQ+bn050+Ysp++RKF1QmtkL4VqvU=
github.com/DataDog/go-tuf v0.3.0--fix-localmeta-fork/go.mod h1:yA5JwkZsHTLuqq3zaRgUQf35DfDkpOZqgtBqHKpwrBs=
+github.com/DataDog/gostackparse v0.5.0 h1:jb72P6GFHPHz2W0onsN51cS3FkaMDcjb0QzgxxA4gDk=
+github.com/DataDog/gostackparse v0.5.0/go.mod h1:lTfqcJKqS9KnXQGnyQMCugq3u1FP6UZMfWR0aitKFMM=
github.com/DataDog/sketches-go v1.4.1 h1:j5G6as+9FASM2qC36lvpvQAj9qsv/jUs3FtO8CwZNAY=
github.com/DataDog/sketches-go v1.4.1/go.mod h1:xJIXldczJyyjnbDop7ZZcLxJdV3+7Kra7H1KMgpgkLk=
github.com/HdrHistogram/hdrhistogram-go v0.9.0 h1:dpujRju0R4M/QZzcnR1LH1qm+TVG3UzkWdp5tH1WMcg=
@@ -98,40 +96,23 @@ github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpz
github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg=
github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE=
-github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
-github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
-github.com/PuerkitoBio/goquery v1.5.1 h1:PSPBGne8NIUWw+/7vFBV+kG2J/5MOjbzc7154OaKCSE=
-github.com/PuerkitoBio/goquery v1.5.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc=
-github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
-github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
-github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
-github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
-github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
github.com/Shopify/toxiproxy/v2 v2.5.0 h1:i4LPT+qrSlKNtQf5QliVjdP08GyAH8+BUIc9gT0eahc=
github.com/Shopify/toxiproxy/v2 v2.5.0/go.mod h1:yhM2epWtAmel9CB8r2+L+PCmhH6yH2pITaPAo7jxJl0=
-github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
-github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
-github.com/andybalholm/cascadia v1.1.0 h1:BuuO6sSfQNFRu1LppgbD25Hr2vLYW25JvxHs5zzsLTo=
-github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/aquarapid/vaultlib v0.5.1 h1:vuLWR6bZzLHybjJBSUYPgZlIp6KZ+SXeHLRRYTuk6d4=
github.com/aquarapid/vaultlib v0.5.1/go.mod h1:yT7AlEXtuabkxylOc/+Ulyp18tff1+QjgNLTnFWTlOs=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
-github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
-github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
-github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
-github.com/aws/aws-sdk-go v1.44.192 h1:KL54vCxRd5v5XBGjnF3FelzXXwl+aWHDmDTihFmRNgM=
-github.com/aws/aws-sdk-go v1.44.192/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
+github.com/aws/aws-sdk-go v1.44.258 h1:JVk1lgpsTnb1kvUw3eGhPLcTpEBp6HeSf1fxcYDs2Ho=
+github.com/aws/aws-sdk-go v1.44.258/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI=
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
@@ -139,13 +120,11 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
-github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/bndr/gotabulate v1.1.2 h1:yC9izuZEphojb9r+KYL4W9IJKO/ceIO8HDwxMA24U4c=
github.com/bndr/gotabulate v1.1.2/go.mod h1:0+8yUgaPTtLRTjf49E8oju7ojpU11YmXyvq1LbPAb3U=
github.com/buger/jsonparser v1.1.1 h1:2PnMjfWD7wBILjqQbt530v576A/cAbQvEW9gGIpYMUs=
github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
@@ -159,91 +138,62 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
-github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
-github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4=
github.com/codegangsta/cli v1.20.0/go.mod h1:/qJNoX69yVSKu5o4jLyXAENLRyk1uhi7zkbQ3slBdOA=
-github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
-github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
-github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
-github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
-github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
-github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
-github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
-github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
-github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/corpix/uarand v0.1.1 h1:RMr1TWc9F4n5jiPDzFHtmaUXLKLNUFK0SgCLo4BhX/U=
github.com/corpix/uarand v0.1.1/go.mod h1:SFKZvkcRoLqVRFZ4u25xPmp6m9ktANfbpXZ7SJ0/FNU=
-github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
-github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
-github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI=
-github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
+github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
+github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
github.com/dave/jennifer v1.6.0 h1:MQ/6emI2xM7wt0tJzJzyUik2Q3Tcn2eE0vtYgh4GPVI=
github.com/dave/jennifer v1.6.0/go.mod h1:AxTG893FiZKqxy3FP1kL80VMshSMuz2G+EgvszgGRnk=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug=
-github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8=
-github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA=
-github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
-github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
-github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
-github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
-github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
-github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/dvyukov/go-fuzz v0.0.0-20210103155950-6a8e9d1f2415/go.mod h1:11Gm+ccJnvAhCNLlf5+cS9KjtbaD5I5zaZpFMsTHWTw=
-github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
-github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
-github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
-github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ=
-github.com/emicklei/go-restful/v3 v3.10.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
-github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
-github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
+github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U=
github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
-github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w=
-github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg=
+github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs=
+github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw=
github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk=
github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
-github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0=
github.com/flynn/go-docopt v0.0.0-20140912013429-f6dd2ebbb31e/go.mod h1:HyVoz1Mz5Co8TFO8EupIdlcpwShBmY98dkT2xeHkvEI=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible h1:TcekIExNqud5crz4xD2pavyTgWiPvpYe4Xau31I0PRk=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE=
+github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
-github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/gammazero/deque v0.2.1 h1:qSdsbG6pgp6nL7A0+K/B7s12mcCY/5l5SIUpMOl+dC0=
+github.com/gammazero/deque v0.2.1/go.mod h1:LFroj8x4cMYCukHJDbxFCkT+r9AndaJnFMuZDV34tuU=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
-github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
-github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
@@ -251,74 +201,17 @@ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
-github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
-github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
-github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
-github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
-github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
-github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI=
-github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
-github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
-github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk=
-github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU=
-github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0=
-github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0=
-github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94=
-github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
-github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
-github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
-github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
-github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
-github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
-github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
-github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
-github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
-github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
-github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
-github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
-github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
-github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
-github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
-github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
-github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
-github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs=
-github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk=
-github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA=
-github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64=
-github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4=
-github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
-github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
-github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
-github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY=
-github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
-github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
-github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
-github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY=
-github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU=
-github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
-github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
-github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
-github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
-github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
-github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
-github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
-github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4=
-github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA=
-github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4=
github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc=
github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ=
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
-github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@@ -331,9 +224,7 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
-github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
-github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@@ -349,15 +240,15 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
-github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
+github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
-github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0=
-github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E=
+github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@@ -369,19 +260,17 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
-github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ=
-github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
+github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw=
+github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
@@ -394,56 +283,48 @@ github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLe
github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26 h1:Xim43kblpZXfIBQsbuBVKCudVG457BR2GZFIz3uw3hQ=
+github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
+github.com/google/s2a-go v0.1.3 h1:FAgZmpLl/SXurPEZyCMPBIiiYeTbqfjlbdnCNTAkbGE=
+github.com/google/s2a-go v0.1.3/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A=
github.com/google/safehtml v0.1.0 h1:EwLKo8qawTKfsi0orxcQAZzu07cICaBeFMegAU9eaT8=
github.com/google/safehtml v0.1.0/go.mod h1:L4KWwDsUJdECRAEpZoBn3O64bQaywRscowZjJAzjHnU=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
-github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/googleapis/enterprise-certificate-proxy v0.2.1 h1:RY7tHKZcRlk788d5WSo/e83gOyyy742E8GSs771ySpg=
-github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k=
+github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k=
+github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
-github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ=
-github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8=
-github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
-github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
+github.com/googleapis/gax-go/v2 v2.8.0 h1:UBtEZqx1bjXtOQ5BVTkuYghXrr3N4V123VKJK67vJZc=
+github.com/googleapis/gax-go/v2 v2.8.0/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI=
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
-github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4=
github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q=
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
-github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
-github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
-github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
-github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
-github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw=
-github.com/hashicorp/consul/api v1.18.0 h1:R7PPNzTCeN6VuQNDwwhZWJvzCtGSrNpJqfb22h3yH9g=
-github.com/hashicorp/consul/api v1.18.0/go.mod h1:owRRGJ9M5xReDC5nfT8FTJrNAPbT4NM6p/k+d03q2v4=
-github.com/hashicorp/consul/sdk v0.13.0 h1:lce3nFlpv8humJL8rNrrGHYSKc3q+Kxfeg3Ii1m6ZWU=
-github.com/hashicorp/consul/sdk v0.13.0/go.mod h1:0hs/l5fOVhJy/VdcoaNqUSi2AUs95eF5WKtv+EYIQqE=
+github.com/hashicorp/consul/api v1.20.0 h1:9IHTjNVSZ7MIwjlW3N3a7iGiykCMDpxZu8jsxFJh0yc=
+github.com/hashicorp/consul/api v1.20.0/go.mod h1:nR64eD44KQ59Of/ECwt2vUmIK2DKsDzAwTmwmLl8Wpo=
+github.com/hashicorp/consul/sdk v0.13.1 h1:EygWVWWMczTzXGpO93awkHFzfUka6hLYJ0qhETd+6lY=
+github.com/hashicorp/consul/sdk v0.13.1/go.mod h1:SW/mM4LbKfqmMvcFu8v+eiQQ7oitXEFeiBe9StxERb0=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
+github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
-github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
-github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
-github.com/hashicorp/go-hclog v1.4.0 h1:ctuWFGrhFha8BnnzxqeRGidlEcQkDyL5u8J8t5eA11I=
-github.com/hashicorp/go-hclog v1.4.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
+github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c=
+github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
@@ -452,6 +333,7 @@ github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iP
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
+github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
@@ -463,7 +345,6 @@ github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE=
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go-version v1.2.1/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
@@ -483,10 +364,6 @@ github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428 h1:Mo9W14pwbO9VfRe+ygqZ8dFbPpoIK1HFrG/zjTuQ+nc=
github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428/go.mod h1:uhpZMVGznybq1itEKXj6RYw9I71qK4kH+OGMjRC4KEo=
-github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
-github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
-github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg=
-github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
@@ -494,24 +371,14 @@ github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9Y
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
-github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
-github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
-github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
-github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
-github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
-github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
-github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
-github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.16.5 h1:IFV2oUNUzZaz+XyusxpLzpzS8Pt5rh0Z16For/djlyI=
@@ -522,28 +389,17 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxv
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/krishicks/yaml-patch v0.0.10 h1:H4FcHpnNwVmw8u0MjPRjWyIXtco6zM2F78t+57oNM3E=
github.com/krishicks/yaml-patch v0.0.10/go.mod h1:Sm5TchwZS6sm7RJoyg87tzxm2ZcKzdRE4Q7TjNhPrME=
-github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
-github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
-github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
-github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
-github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
@@ -552,19 +408,16 @@ github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E=
-github.com/mattn/go-ieproxy v0.0.9 h1:RvVbLiMv/Hbjf1gRaC2AQyzwbdVhdId7D2vPnXIml4k=
-github.com/mattn/go-ieproxy v0.0.9/go.mod h1:eF30/rfdQUO9EnzNIZQr0r9HiLMlZNCpJkHbmMuOAE0=
+github.com/mattn/go-ieproxy v0.0.10 h1:P+2QihaKCLgbs/32dhFLbxXlqsy8tIG1LUXHIoPaQPo=
+github.com/mattn/go-ieproxy v0.0.10/go.mod h1:/NsJd+kxZBmjMc5hrJCKMbP57B84rvq9BiDRbtO9AS0=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
-github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
-github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
-github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng=
-github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
-github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
+github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98=
+github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
@@ -578,72 +431,54 @@ github.com/miekg/dns v1.1.41 h1:WMszZWJG0XmzbK9FEmzH2TVcqYzFesusSIB41b8KHxY=
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
github.com/minio/minio-go v0.0.0-20190131015406-c8a261de75c1 h1:jw16EimP5oAEM/2wt+SiEUov/YDyTCTDuPtIKgQIvk0=
github.com/minio/minio-go v0.0.0-20190131015406-c8a261de75c1/go.mod h1:vuvdOZLJuf5HmJAJrKV64MmozrSsk+or0PB5dzdfspg=
-github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
-github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
-github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
-github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/montanaflynn/stats v0.7.0 h1:r3y12KyNxj/Sb/iOE46ws+3mS1+MZca1wlHQFPsY/JU=
github.com/montanaflynn/stats v0.7.0/go.mod h1:etXPPgVO6n31NxCd9KQUMvCM+ve0ruNzt6R8Bnaayow=
-github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
-github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
-github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/ngdinhtoan/glide-cleanup v0.2.0/go.mod h1:UQzsmiDOb8YV3nOsCxK/c9zPpCZVNoHScRE3EO9pVMM=
-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nsf/jsondiff v0.0.0-20210926074059-1e845ec5d249 h1:NHrXEjTNQY7P0Zfx1aMrNhpgxHmow66XQtm0aQLY0AE=
github.com/nsf/jsondiff v0.0.0-20210926074059-1e845ec5d249/go.mod h1:mpRZBD8SJ55OIICQ3iWH0Yz3cjzA61JdqMLoWXeB2+8=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
-github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
-github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
-github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
-github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs=
-github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
-github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs=
github.com/onsi/gomega v1.23.0 h1:/oxKu9c2HVap+F3PfKort2Hw5DEU+HGlW8n+tguWsys=
+github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg=
github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e h1:4cPxUYdgaGzZIT5/j0IfqOrrXmq6bG8AwvwisMXpdrg=
github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
+github.com/outcaste-io/ristretto v0.2.0/go.mod h1:iBZA7RCt6jaOr0z6hiBQ6t662/oZ6Gx/yauuPvIWHAI=
+github.com/outcaste-io/ristretto v0.2.1 h1:KCItuNIGJZcursqHr3ghO7fc5ddZLEHspL9UR0cQM64=
+github.com/outcaste-io/ristretto v0.2.1/go.mod h1:W8HywhmtlopSB1jeMg3JtdIhf+DYkLAr0VN/s4+MHac=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
-github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
-github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
-github.com/pelletier/go-toml/v2 v2.0.6 h1:nrzqCb7j9cDFj2coyLNLaZuJTLjWjlaz6nvTvIwycIU=
-github.com/pelletier/go-toml/v2 v2.0.6/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek=
-github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
+github.com/pelletier/go-toml/v2 v2.0.7 h1:muncTPStnKRos5dpVKULv2FVd4bMOhNePj9CjgDb8Us=
+github.com/pelletier/go-toml/v2 v2.0.7/go.mod h1:eumQOmlWiOPt5WriQQqoM5y18pDHwha2N+QD+EUNTek=
github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw=
github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0=
github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM=
@@ -658,28 +493,27 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
github.com/planetscale/pargzip v0.0.0-20201116224723-90c7fc03ea8a h1:y0OpQ4+5tKxeh9+H+2cVgASl9yMZYV9CILinKOiKafA=
github.com/planetscale/pargzip v0.0.0-20201116224723-90c7fc03ea8a/go.mod h1:GJFUzQuXIoB2Kjn1ZfDhJr/42D5nWOqRcIQVgCxTuIE=
-github.com/planetscale/vtprotobuf v0.4.0 h1:NEI+g4woRaAZgeZ3sAvbtyvMBRjIv5kE7EWYQ8m4JwY=
-github.com/planetscale/vtprotobuf v0.4.0/go.mod h1:wm1N3qk9G/4+VM1WhpkLbvY/d8+0PbwYYpP5P5VhTks=
+github.com/planetscale/vtprotobuf v0.5.0 h1:l8PXm6Colok5z6qQLNhAj2Jq5BfoMTIHxLER5a6nDqM=
+github.com/planetscale/vtprotobuf v0.5.0/go.mod h1:wm1N3qk9G/4+VM1WhpkLbvY/d8+0PbwYYpP5P5VhTks=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
-github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
-github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
-github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
+github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI=
+github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
-github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
+github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
+github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
-github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI=
-github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y=
+github.com/prometheus/common v0.43.0 h1:iq+BVjvYLei5f27wiuNiB1DN6DYQkp1c8Bx0Vykh5us=
+github.com/prometheus/common v0.43.0/go.mod h1:NCvr5cQIh3Y/gy73/RdVtC9r8xxrxwJnB+2lB3BxrFc=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
@@ -690,25 +524,24 @@ github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqn
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
+github.com/richardartoul/molecule v1.0.1-0.20221107223329-32cfee06a052 h1:Qp27Idfgi6ACvFQat5+VJvlYToylpM/hcyLBI3WaKPA=
+github.com/richardartoul/molecule v1.0.1-0.20221107223329-32cfee06a052/go.mod h1:uvX/8buq8uVeiZiFht+0lqSLBHF+uGV8BrTv8W/SIwk=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
-github.com/rivo/uniseg v0.4.3 h1:utMvzDsuh3suAEnhH0RdHmoPbU648o6CvXxTx4SBMOw=
-github.com/rivo/uniseg v0.4.3/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
-github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
+github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
+github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
-github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
+github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
+github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
-github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/secure-systems-lab/go-securesystemslib v0.3.1/go.mod h1:o8hhjkbNl2gOamKUA/eNW3xUrntHT9L4W89W1nfj43U=
-github.com/secure-systems-lab/go-securesystemslib v0.4.0 h1:b23VGrQhTA8cN2CbBw7/FulN9fTtqYUdS5+Oxzt+DUE=
-github.com/secure-systems-lab/go-securesystemslib v0.4.0/go.mod h1:FGBZgq2tXWICsxWQW1msNf49F0Pf2Op5Htayx335Qbs=
-github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
+github.com/secure-systems-lab/go-securesystemslib v0.5.0 h1:oTiNu0QnulMQgN/hLK124wJD/r2f9ZhIUuKIeBsCBT8=
+github.com/secure-systems-lab/go-securesystemslib v0.5.0/go.mod h1:uoCqUC0Ap7jrBSEanxT+SdACYJTVplRXWLkGMuDjXqk=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
@@ -716,36 +549,24 @@ github.com/sjmudd/stopwatch v0.1.1 h1:x45OvxFB5OtCkjvYtzRF5fWB857Jzjjk84Oyd5C5eb
github.com/sjmudd/stopwatch v0.1.1/go.mod h1:BLw0oIQJ1YLXBO/q9ufK/SgnKBVIkC2qrm6uy78Zw6U=
github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s=
-github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js=
github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
-github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
-github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
-github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
+github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
+github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.9.3 h1:41FoI0fD7OR7mGcKE/aOiLkGreyf8ifIOQmJANWogMk=
github.com/spf13/afero v1.9.3/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y=
-github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=
github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU=
-github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
-github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA=
github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY=
-github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
-github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
-github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
github.com/spf13/viper v1.15.0 h1:js3yy885G8xwJa6iOISGFwd+qlUo5AvyXb7CiihdtiU=
github.com/spf13/viper v1.15.0/go.mod h1:fFcTBJxvhhzSJiZy8n+PeW6t8l+KeT/uTARa0jHOQLA=
-github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
@@ -758,8 +579,9 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
-github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
+github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
+github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8=
github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
@@ -769,27 +591,17 @@ github.com/tidwall/gjson v1.12.1 h1:ikuZsLdhr8Ws0IdROXUS1Gi4v9Z4pGqpX/CvJkxvfpo=
github.com/tidwall/gjson v1.12.1/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
-github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs=
github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
github.com/tinylib/msgp v1.1.8 h1:FCXC1xanKO4I8plpHGH2P7koL/RzZs12l/+r7vakfm0=
github.com/tinylib/msgp v1.1.8/go.mod h1:qkpG+2ldGg4xRFmx+jfTvZPxfGFhi64BcnL9vkCm/Tw=
-github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o=
github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg=
github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
-github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
-github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
-github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw=
-github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
-github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
-github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
-github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ=
github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0=
-github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82 h1:BHyfKlQyqbsFN5p3IfnEUduWvb9is428/nNb5L3U01M=
github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@@ -800,17 +612,12 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/z-division/go-zookeeper v1.0.0 h1:ULsCj0nP6+U1liDFWe+2oEF6o4amixoDcDlwEUghVUY=
github.com/z-division/go-zookeeper v1.0.0/go.mod h1:6X4UioQXpvyezJJl4J9NHAJKsoffCwy5wCaaTktXjOA=
-go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
-go.etcd.io/etcd/api/v3 v3.5.7 h1:sbcmosSVesNrWOJ58ZQFitHMdncusIifYcrBfwrlJSY=
-go.etcd.io/etcd/api/v3 v3.5.7/go.mod h1:9qew1gCdDDLu+VwmeG+iFpL+QlpHTo7iubavdVDgCAA=
-go.etcd.io/etcd/client/pkg/v3 v3.5.7 h1:y3kf5Gbp4e4q7egZdn5T7W9TSHUvkClN6u+Rq9mEOmg=
-go.etcd.io/etcd/client/pkg/v3 v3.5.7/go.mod h1:o0Abi1MK86iad3YrWhgUsbGx1pmTS+hrORWc2CamuhY=
-go.etcd.io/etcd/client/v3 v3.5.7 h1:u/OhpiuCgYY8awOHlhIhmGIGpxfBU/GZBUP3m/3/Iz4=
-go.etcd.io/etcd/client/v3 v3.5.7/go.mod h1:sOWmj9DZUMyAngS7QQwCyAXXAL6WhgTOPLNS/NabQgw=
-go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
-go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
-go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
+go.etcd.io/etcd/api/v3 v3.5.8 h1:Zf44zJszoU7zRV0X/nStPenegNXoFDWcB/MwrJbA+L4=
+go.etcd.io/etcd/api/v3 v3.5.8/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k=
+go.etcd.io/etcd/client/pkg/v3 v3.5.8 h1:tPp9YRn/UBFAHdhOQUII9eUs7aOK35eulpMhX4YBd+M=
+go.etcd.io/etcd/client/pkg/v3 v3.5.8/go.mod h1:y+CzeSmkMpWN2Jyu1npecjB9BBnABxGM4pN8cGuJeL4=
+go.etcd.io/etcd/client/v3 v3.5.8 h1:B6ngTKZSWWowHEoaucOKHQR/AtZKaoHLiUpWxOLG4l4=
+go.etcd.io/etcd/client/v3 v3.5.8/go.mod h1:idZYIPVkttBJBiRigkB5EM0MmEyx8jcl18zCV3F5noc=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
@@ -820,36 +627,34 @@ go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
-go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
-go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
+go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
-go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI=
-go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
+go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
+go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
+go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
+go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4=
go.uber.org/mock v0.2.0 h1:TaP3xedm7JaAgScZO7tlvlKrqT0p7I6OsdGB5YNSMDU=
go.uber.org/mock v0.2.0/go.mod h1:J0y0rp9L3xiff1+ZBfKxlC1fz2+aO16tw0tsDOixfuM=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
-go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
-go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
+go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
+go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-go.uber.org/zap v1.23.0 h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY=
-go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY=
+go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=
+go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg=
go4.org/intern v0.0.0-20211027215823-ae77deb06f29/go.mod h1:cS2ma+47FKrLPdXFpr7CuxiTW3eyJbWew4qx0qtQWDA=
-go4.org/intern v0.0.0-20220617035311-6925f38cc365 h1:t9hFvR102YlOqU0fQn1wgwhNvSbHGBbbJxX9JKfU3l0=
-go4.org/intern v0.0.0-20220617035311-6925f38cc365/go.mod h1:WXRv3p7T6gzt0CcJm43AAKdKVZmcQbwwC7EwquU5BZU=
+go4.org/intern v0.0.0-20230205224052-192e9f60865c h1:b8WZ7Ja8nKegYxfwDLLwT00ZKv4lXAQrw8LYPK+cHSI=
+go4.org/intern v0.0.0-20230205224052-192e9f60865c/go.mod h1:RJ0SVrOMpxLhgb5noIV+09zI1RsRlMsbUcSxpWHqbrE=
go4.org/unsafe/assume-no-moving-gc v0.0.0-20211027215541-db492cf91b37/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E=
-go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760 h1:FyBZqvoA/jbNzuAWLQE2kG820zMAkcilx6BMjGbL/E4=
go4.org/unsafe/assume-no-moving-gc v0.0.0-20220617031537-928513b29760/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E=
+go4.org/unsafe/assume-no-moving-gc v0.0.0-20230204201903-c31fa085b70e/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E=
+go4.org/unsafe/assume-no-moving-gc v0.0.0-20230426161633-7e06285ff160 h1:LrTREdITdNDW/JRlUuG3fhXvCK3ZcKXTCf1BbxE8sT4=
+go4.org/unsafe/assume-no-moving-gc v0.0.0-20230426161633-7e06285ff160/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190128193316-c7b33c32a30b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
@@ -859,10 +664,9 @@ golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
-golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
-golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
-golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk=
-golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
+golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
+golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -888,7 +692,6 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
-golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
@@ -901,23 +704,17 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
-golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA=
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
-golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
@@ -926,8 +723,6 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190921015927-1a5e07d1ff72/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -958,18 +753,13 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b
golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
golang.org/x/net v0.0.0-20210610132358-84b48f89b13b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
-golang.org/x/net v0.0.0-20220630215102-69896b714898/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
-golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g=
-golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
-golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14=
-golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
+golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
+golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -979,8 +769,8 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M=
-golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec=
+golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g=
+golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -993,39 +783,30 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
-golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1049,7 +830,6 @@ golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1066,56 +846,44 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220627191245-f75cf1eec38b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
-golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
-golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
+golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
-golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY=
-golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
-golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0=
-golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
-golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek=
+golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
+golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
-golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
-golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
-golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc=
-golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
-golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
+golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
-golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
@@ -1124,8 +892,6 @@ golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBn
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
@@ -1151,7 +917,6 @@ golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjs
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
@@ -1171,10 +936,8 @@ golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
-golang.org/x/tools v0.5.0 h1:+bSpV5HIeWkuvgaMfI3UmKRThoTA5ODJTUd8T17NO+4=
-golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k=
-golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss=
-golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM=
+golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846 h1:Vve/L0v7CXXuxUmaMGIEK/dEeq7uiqb5qBgQrZzIE7E=
+golang.org/x/tools v0.12.1-0.20230815132531-74c255bcf846/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -1200,8 +963,8 @@ google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz513
google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
-google.golang.org/api v0.109.0 h1:sW9hgHyX497PP5//NUM7nqfV8D0iDfBApqq7sOh1XR8=
-google.golang.org/api v0.109.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY=
+google.golang.org/api v0.121.0 h1:8Oopoo8Vavxx6gt+sgs8s8/X60WBAtKQq6JqnkF+xow=
+google.golang.org/api v0.121.0/go.mod h1:gcitW0lvnyWjSp9nKxAbdHKIZ6vF4aajGueeslZOyms=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -1249,9 +1012,8 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20230131230820-1c016267d619 h1:p0kMzw6AG0JEzd7Z+kXqOiLhC6gjUQTbtS2zR0Q3DbI=
-google.golang.org/genproto v0.0.0-20230131230820-1c016267d619/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM=
+google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A=
+google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@@ -1271,11 +1033,11 @@ google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
-google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
-google.golang.org/grpc v1.52.3 h1:pf7sOysg4LdgBqduXveGKrcEwbStiK2rtfghdzlUYDQ=
-google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY=
-google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0 h1:TLkBREm4nIsEcexnCjgQd5GQWaHcqMzwQV0TX9pq8S0=
-google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.2.0/go.mod h1:DNq5QpG7LJqD2AamLZ7zvKE0DEpVl2BSEVjFycAAjRY=
+google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
+google.golang.org/grpc v1.55.0-dev h1:b3WG8LoyS+X/C5ZbIWsJGjt8Hhqq0wUVX8+rPF/BHZo=
+google.golang.org/grpc v1.55.0-dev/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g=
+google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0 h1:rNBFJjBCOgVr9pWD7rs/knKL4FRTKgpZmsRfV214zcA=
+google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.3.0/go.mod h1:Dk1tviKTvMCz5tvh7t+fh94dhmQVHuCt2OzJB3CTW9Y=
google.golang.org/grpc/examples v0.0.0-20210430044426-28078834f35b h1:D/GTYPo6I1oEo08Bfpuj3xl5XE+UGHj7//5fVyKxhsQ=
google.golang.org/grpc/examples v0.0.0-20210430044426-28078834f35b/go.mod h1:Ly7ZA/ARzg8fnPU9TyZIxoz33sEUuWX7txiqs8lPTgE=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
@@ -1290,12 +1052,11 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
-google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
-google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
-google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
-gopkg.in/DataDog/dd-trace-go.v1 v1.47.0 h1:w3mHEgOR1o52mkyCbkTM+El8DG732+Fnug4FAGhIpsk=
-gopkg.in/DataDog/dd-trace-go.v1 v1.47.0/go.mod h1:aHb6c4hPRANXnB64LDAKyfWotKgfRjlHv23MnahM8AI=
+google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
+google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
+gopkg.in/DataDog/dd-trace-go.v1 v1.50.1 h1:DUpHhh+MHtpYnUyGr5rpfvKUXkRg93TSEHii/LZVF6g=
+gopkg.in/DataDog/dd-trace-go.v1 v1.50.1/go.mod h1:sw4gV8LIXseC5ISMbDJmm79OJDdl8I2Hhtelb6lpHuQ=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM=
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw=
@@ -1304,26 +1065,15 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
-gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
-gopkg.in/gcfg.v1 v1.2.3 h1:m8OOJ4ccYHnx2f4gQwpno8nAX5OGOh7RLaaz0pj3Ogs=
-gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
-gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
-gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.41.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ldap.v2 v2.5.1 h1:wiu0okdNfjlBzg6UWvd1Hn8Y+Ux17/u/4nlk4CQr6tU=
gopkg.in/ldap.v2 v2.5.1/go.mod h1:oI0cpe/D7HRtBQl8aTg+ZmzFUAvu4lsv3eLXMLGFxWk=
-gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
-gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
-gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
-gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
-gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
-gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@@ -1334,8 +1084,6 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
@@ -1349,38 +1097,6 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
inet.af/netaddr v0.0.0-20220811202034-502d2d690317 h1:U2fwK6P2EqmopP/hFLTOAjWTki0qgd4GMJn5X8wOleU=
inet.af/netaddr v0.0.0-20220811202034-502d2d690317/go.mod h1:OIezDfdzOgFhuw4HuWapWq2e9l0H9tK4F1j+ETRtF3k=
-k8s.io/api v0.18.19/go.mod h1:lmViaHqL3es8JiaK3pCJMjBKm2CnzIcAXpHKifwbmAg=
-k8s.io/api v0.26.1 h1:f+SWYiPd/GsiWwVRz+NbFyCgvv75Pk9NK6dlkZgpCRQ=
-k8s.io/api v0.26.1/go.mod h1:xd/GBNgR0f707+ATNyPmQ1oyKSgndzXij81FzWGsejg=
-k8s.io/apiextensions-apiserver v0.18.19 h1:z7tzzrsODC0cqvp3Pcy2HHc6wOnaSQQEWn0l/jbrJ6c=
-k8s.io/apiextensions-apiserver v0.18.19/go.mod h1:kiomVdryKCrn+R0E+iPx+bZ/00rgj5tPXEBduSEJwgI=
-k8s.io/apimachinery v0.18.19/go.mod h1:70HIRzSveORLKbatTlXzI2B2UUhbWzbq8Vqyf+HbdUQ=
-k8s.io/apimachinery v0.26.1 h1:8EZ/eGJL+hY/MYCNwhmDzVqq2lPl3N3Bo8rvweJwXUQ=
-k8s.io/apimachinery v0.26.1/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74=
-k8s.io/apiserver v0.18.19/go.mod h1:VY80gRUh89Cmnx2s9S5nZTF8vwzEKweAFy7nTFuFLRU=
-k8s.io/client-go v0.18.19/go.mod h1:lB+d4UqdzSjaU41VODLYm/oon3o05LAzsVpm6Me5XkY=
-k8s.io/client-go v0.26.1 h1:87CXzYJnAMGaa/IDDfRdhTzxk/wzGZ+/HUQpqgVSZXU=
-k8s.io/client-go v0.26.1/go.mod h1:IWNSglg+rQ3OcvDkhY6+QLeasV4OYHDjdqeWkDQZwGE=
-k8s.io/code-generator v0.18.19/go.mod h1:l5yJd8cLSvkIb0ZJMsQdWuDOx5rWfLNpgmHQyl3LmBE=
-k8s.io/code-generator v0.26.1 h1:dusFDsnNSKlMFYhzIM0jAO1OlnTN5WYwQQ+Ai12IIlo=
-k8s.io/code-generator v0.26.1/go.mod h1:OMoJ5Dqx1wgaQzKgc+ZWaZPfGjdRq/Y3WubFrZmeI3I=
-k8s.io/component-base v0.18.19/go.mod h1:nQMCdH6RaS/GD0J1YZqc5NInfCdknth4BwlAT5Mf7tA=
-k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
-k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
-k8s.io/gengo v0.0.0-20221011193443-fad74ee6edd9 h1:iu3o/SxaHVI7tKPtkGzD3M9IzrE21j+CUKH98NQJ8Ms=
-k8s.io/gengo v0.0.0-20221011193443-fad74ee6edd9/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
-k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
-k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
-k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
-k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
-k8s.io/klog/v2 v2.90.0 h1:VkTxIV/FjRXn1fgNNcKGM8cfmL1Z33ZjXRTVxKCoF5M=
-k8s.io/klog/v2 v2.90.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
-k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
-k8s.io/kube-openapi v0.0.0-20230202010329-39b3636cbaa3 h1:vV3ZKAUX0nMjTflyfVea98dTfROpIxDaEsQws0FT2Ts=
-k8s.io/kube-openapi v0.0.0-20230202010329-39b3636cbaa3/go.mod h1:/BYxry62FuDzmI+i9B+X2pqfySRmSOW2ARmj5Zbqhj0=
-k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
-k8s.io/utils v0.0.0-20230115233650-391b47cb4029 h1:L8zDtT4jrxj+TaQYD0k8KNlr556WaVQylDXswKmX+dE=
-k8s.io/utils v0.0.0-20230115233650-391b47cb4029/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
lukechampine.com/uint128 v1.2.0 h1:mBi/5l91vocEN8otkC5bDLhi2KdCticRiwbdB0O+rjI=
lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
modernc.org/cc/v3 v3.40.0 h1:P3g79IUS/93SYhtoeaHW+kRCIrYaxJ27MFPv+7kaTOw=
@@ -1388,9 +1104,11 @@ modernc.org/cc/v3 v3.40.0/go.mod h1:/bTg4dnWkSXowUO6ssQKnOV0yMVxDYNIsIrzqTFDGH0=
modernc.org/ccgo/v3 v3.16.13 h1:Mkgdzl46i5F/CNR/Kj80Ri59hC8TKAhZrYSaqvkwzUw=
modernc.org/ccgo/v3 v3.16.13/go.mod h1:2Quk+5YgpImhPjv2Qsob1DnZ/4som1lJTodubIcoUkY=
modernc.org/ccorpus v1.11.6 h1:J16RXiiqiCgua6+ZvQot4yUuUy8zxgqbqEEUuGPlISk=
+modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ=
modernc.org/httpfs v1.0.6 h1:AAgIpFZRXuYnkjftxTAZwMIiwEqAfk8aVB2/oA6nAeM=
-modernc.org/libc v1.22.2 h1:4U7v51GyhlWqQmwCHj28Rdq2Yzwk55ovjFrdPjs8Hb0=
-modernc.org/libc v1.22.2/go.mod h1:uvQavJ1pZ0hIoC/jfqNoMLURIMhKzINIWypNM17puug=
+modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM=
+modernc.org/libc v1.22.5 h1:91BNch/e5B0uPbJFgqbxXuOnxBQjlS//icfQEGmvyjE=
+modernc.org/libc v1.22.5/go.mod h1:jj+Z7dTNX8fBScMVNRAYZ/jF91K8fdT2hYMThc3YjBY=
modernc.org/mathutil v1.5.0 h1:rV0Ko/6SfM+8G+yKiyI830l3Wuz1zRutdslNoQ0kfiQ=
modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
modernc.org/memory v1.5.0 h1:N+/8c5rE6EqugZwHii4IFsaJ7MUhoWX07J5tC/iI5Ds=
@@ -1402,20 +1120,13 @@ modernc.org/sqlite v1.20.3/go.mod h1:zKcGyrICaxNTMEHSr1HQ2GUraP0j+845GYw37+EyT6A
modernc.org/strutil v1.1.3 h1:fNMm+oJklMGYfU9Ylcywl0CO5O6nTfaowNsh2wpPjzY=
modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw=
modernc.org/tcl v1.15.0 h1:oY+JeD11qVVSgVvodMJsu7Edf8tr5E/7tuhF5cNYz34=
+modernc.org/tcl v1.15.0/go.mod h1:xRoGotBZ6dU+Zo2tca+2EqVEeMmOUBzHnhIwq4YrVnE=
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
modernc.org/z v1.7.0 h1:xkDw/KepgEjeizO2sNco+hqYkU12taxQFqPEmgm1GWE=
+modernc.org/z v1.7.0/go.mod h1:hVdgNMh8ggTuRG1rGU8x+xGRFfiQUIAw0ZqlPy8+HyQ=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
-sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0=
-sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
-sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
-sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
-sigs.k8s.io/structured-merge-diff/v3 v3.0.1/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
-sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
-sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
-sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
-sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=
diff --git a/go/cache/cache.go b/go/cache/cache.go
deleted file mode 100644
index b6466132452..00000000000
--- a/go/cache/cache.go
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
-Copyright 2021 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package cache
-
-// Cache is a generic interface type for a data structure that keeps recently used
-// objects in memory and evicts them when it becomes full.
-type Cache interface {
- Get(key string) (any, bool)
- Set(key string, val any) bool
- ForEach(callback func(any) bool)
-
- Delete(key string)
- Clear()
-
- // Wait waits for all pending operations on the cache to settle. Since cache writes
- // are asynchronous, a write may not be immediately accessible unless the user
- // manually calls Wait.
- Wait()
-
- Len() int
- Evictions() int64
- Hits() int64
- Misses() int64
- UsedCapacity() int64
- MaxCapacity() int64
- SetCapacity(int64)
-}
-
-type cachedObject interface {
- CachedSize(alloc bool) int64
-}
-
-// NewDefaultCacheImpl returns the default cache implementation for Vitess. The options in the
-// Config struct control the memory and entry limits for the cache, and the underlying cache
-// implementation.
-func NewDefaultCacheImpl(cfg *Config) Cache {
- switch {
- case cfg == nil:
- return &nullCache{}
-
- case cfg.LFU:
- if cfg.MaxEntries == 0 || cfg.MaxMemoryUsage == 0 {
- return &nullCache{}
- }
- return NewRistrettoCache(cfg.MaxEntries, cfg.MaxMemoryUsage, func(val any) int64 {
- return val.(cachedObject).CachedSize(true)
- })
-
- default:
- if cfg.MaxEntries == 0 {
- return &nullCache{}
- }
- return NewLRUCache(cfg.MaxEntries, func(_ any) int64 {
- return 1
- })
- }
-}
-
-// Config is the configuration options for a cache instance
-type Config struct {
- // MaxEntries is the estimated amount of entries that the cache will hold at capacity
- MaxEntries int64
- // MaxMemoryUsage is the maximum amount of memory the cache can handle
- MaxMemoryUsage int64
- // LFU toggles whether to use a new cache implementation with a TinyLFU admission policy
- LFU bool
-}
-
-// DefaultConfig is the default configuration for a cache instance in Vitess
-var DefaultConfig = &Config{
- MaxEntries: 5000,
- MaxMemoryUsage: 32 * 1024 * 1024,
- LFU: true,
-}
diff --git a/go/cache/cache_test.go b/go/cache/cache_test.go
deleted file mode 100644
index 911a3bb207b..00000000000
--- a/go/cache/cache_test.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package cache
-
-import (
- "fmt"
- "testing"
-
- "github.com/stretchr/testify/require"
-
- "vitess.io/vitess/go/cache/ristretto"
-)
-
-func TestNewDefaultCacheImpl(t *testing.T) {
- assertNullCache := func(t *testing.T, cache Cache) {
- _, ok := cache.(*nullCache)
- require.True(t, ok)
- }
-
- assertLFUCache := func(t *testing.T, cache Cache) {
- _, ok := cache.(*ristretto.Cache)
- require.True(t, ok)
- }
-
- assertLRUCache := func(t *testing.T, cache Cache) {
- _, ok := cache.(*LRUCache)
- require.True(t, ok)
- }
-
- tests := []struct {
- cfg *Config
- verify func(t *testing.T, cache Cache)
- }{
- {&Config{MaxEntries: 0, MaxMemoryUsage: 0, LFU: false}, assertNullCache},
- {&Config{MaxEntries: 0, MaxMemoryUsage: 0, LFU: true}, assertNullCache},
- {&Config{MaxEntries: 100, MaxMemoryUsage: 0, LFU: false}, assertLRUCache},
- {&Config{MaxEntries: 0, MaxMemoryUsage: 1000, LFU: false}, assertNullCache},
- {&Config{MaxEntries: 100, MaxMemoryUsage: 1000, LFU: false}, assertLRUCache},
- {&Config{MaxEntries: 100, MaxMemoryUsage: 0, LFU: true}, assertNullCache},
- {&Config{MaxEntries: 100, MaxMemoryUsage: 1000, LFU: true}, assertLFUCache},
- {&Config{MaxEntries: 0, MaxMemoryUsage: 1000, LFU: true}, assertNullCache},
- }
- for _, tt := range tests {
- t.Run(fmt.Sprintf("%d.%d.%v", tt.cfg.MaxEntries, tt.cfg.MaxMemoryUsage, tt.cfg.LFU), func(t *testing.T) {
- cache := NewDefaultCacheImpl(tt.cfg)
- tt.verify(t, cache)
- })
- }
-}
diff --git a/go/cache/lru_cache.go b/go/cache/lru_cache.go
index 31ceadaf201..d845265b77b 100644
--- a/go/cache/lru_cache.go
+++ b/go/cache/lru_cache.go
@@ -29,8 +29,6 @@ import (
"time"
)
-var _ Cache = &LRUCache{}
-
// LRUCache is a typical LRU cache implementation. If the cache
// reaches the capacity, the least recently used item is deleted from
// the cache. Note the capacity is not the number of items, but the
@@ -250,3 +248,7 @@ func (lru *LRUCache) checkCapacity() {
lru.evictions++
}
}
+
+func (lru *LRUCache) Close() {
+ lru.Clear()
+}
diff --git a/go/cache/null.go b/go/cache/null.go
deleted file mode 100644
index c99d52eb2ec..00000000000
--- a/go/cache/null.go
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
-Copyright 2021 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package cache
-
-// nullCache is a no-op cache that does not store items
-type nullCache struct{}
-
-// Get never returns anything on the nullCache
-func (n *nullCache) Get(_ string) (any, bool) {
- return nil, false
-}
-
-// Set is a no-op in the nullCache
-func (n *nullCache) Set(_ string, _ any) bool {
- return false
-}
-
-// ForEach iterates the nullCache, which is always empty
-func (n *nullCache) ForEach(_ func(any) bool) {}
-
-// Delete is a no-op in the nullCache
-func (n *nullCache) Delete(_ string) {}
-
-// Clear is a no-op in the nullCache
-func (n *nullCache) Clear() {}
-
-// Wait is a no-op in the nullcache
-func (n *nullCache) Wait() {}
-
-func (n *nullCache) Len() int {
- return 0
-}
-
-// Hits returns number of cache hits since creation
-func (n *nullCache) Hits() int64 {
- return 0
-}
-
-// Hits returns number of cache misses since creation
-func (n *nullCache) Misses() int64 {
- return 0
-}
-
-// Capacity returns the capacity of the nullCache, which is always 0
-func (n *nullCache) UsedCapacity() int64 {
- return 0
-}
-
-// Capacity returns the capacity of the nullCache, which is always 0
-func (n *nullCache) MaxCapacity() int64 {
- return 0
-}
-
-// SetCapacity sets the capacity of the null cache, which is a no-op
-func (n *nullCache) SetCapacity(_ int64) {}
-
-func (n *nullCache) Evictions() int64 {
- return 0
-}
diff --git a/go/cache/ristretto.go b/go/cache/ristretto.go
deleted file mode 100644
index 6d6f596a5b9..00000000000
--- a/go/cache/ristretto.go
+++ /dev/null
@@ -1,28 +0,0 @@
-package cache
-
-import (
- "vitess.io/vitess/go/cache/ristretto"
-)
-
-var _ Cache = &ristretto.Cache{}
-
-// NewRistrettoCache returns a Cache implementation based on Ristretto
-func NewRistrettoCache(maxEntries, maxCost int64, cost func(any) int64) *ristretto.Cache {
- // The TinyLFU paper recommends to allocate 10x times the max entries amount as counters
- // for the admission policy; since our caches are small and we're very interested on admission
- // accuracy, we're a bit more greedy than 10x
- const CounterRatio = 12
-
- config := ristretto.Config{
- NumCounters: maxEntries * CounterRatio,
- MaxCost: maxCost,
- BufferItems: 64,
- Metrics: true,
- Cost: cost,
- }
- cache, err := ristretto.NewCache(&config)
- if err != nil {
- panic(err)
- }
- return cache
-}
diff --git a/go/cache/ristretto/bloom/bbloom.go b/go/cache/ristretto/bloom/bbloom.go
deleted file mode 100644
index ce5daa6864d..00000000000
--- a/go/cache/ristretto/bloom/bbloom.go
+++ /dev/null
@@ -1,151 +0,0 @@
-// The MIT License (MIT)
-// Copyright (c) 2014 Andreas Briese, eduToolbox@Bri-C GmbH, Sarstedt
-
-// Permission is hereby granted, free of charge, to any person obtaining a copy of
-// this software and associated documentation files (the "Software"), to deal in
-// the Software without restriction, including without limitation the rights to
-// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-// the Software, and to permit persons to whom the Software is furnished to do so,
-// subject to the following conditions:
-
-// The above copyright notice and this permission notice shall be included in all
-// copies or substantial portions of the Software.
-
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-package bloom
-
-import (
- "math"
- "unsafe"
-)
-
-// helper
-var mask = []uint8{1, 2, 4, 8, 16, 32, 64, 128}
-
-func getSize(ui64 uint64) (size uint64, exponent uint64) {
- if ui64 < uint64(512) {
- ui64 = uint64(512)
- }
- size = uint64(1)
- for size < ui64 {
- size <<= 1
- exponent++
- }
- return size, exponent
-}
-
-// NewBloomFilterWithErrorRate returns a new bloomfilter with optimal size for the given
-// error rate
-func NewBloomFilterWithErrorRate(numEntries uint64, wrongs float64) *Bloom {
- size := -1 * float64(numEntries) * math.Log(wrongs) / math.Pow(0.69314718056, 2)
- locs := math.Ceil(0.69314718056 * size / float64(numEntries))
- return NewBloomFilter(uint64(size), uint64(locs))
-}
-
-// NewBloomFilter returns a new bloomfilter.
-func NewBloomFilter(entries, locs uint64) (bloomfilter *Bloom) {
- size, exponent := getSize(entries)
- bloomfilter = &Bloom{
- sizeExp: exponent,
- size: size - 1,
- setLocs: locs,
- shift: 64 - exponent,
- }
- bloomfilter.Size(size)
- return bloomfilter
-}
-
-// Bloom filter
-type Bloom struct {
- bitset []uint64
- ElemNum uint64
- sizeExp uint64
- size uint64
- setLocs uint64
- shift uint64
-}
-
-// <--- http://www.cse.yorku.ca/~oz/hash.html
-// modified Berkeley DB Hash (32bit)
-// hash is casted to l, h = 16bit fragments
-// func (bl Bloom) absdbm(b *[]byte) (l, h uint64) {
-// hash := uint64(len(*b))
-// for _, c := range *b {
-// hash = uint64(c) + (hash << 6) + (hash << bl.sizeExp) - hash
-// }
-// h = hash >> bl.shift
-// l = hash << bl.shift >> bl.shift
-// return l, h
-// }
-
-// Add adds hash of a key to the bloomfilter.
-func (bl *Bloom) Add(hash uint64) {
- h := hash >> bl.shift
- l := hash << bl.shift >> bl.shift
- for i := uint64(0); i < bl.setLocs; i++ {
- bl.Set((h + i*l) & bl.size)
- bl.ElemNum++
- }
-}
-
-// Has checks if bit(s) for entry hash is/are set,
-// returns true if the hash was added to the Bloom Filter.
-func (bl Bloom) Has(hash uint64) bool {
- h := hash >> bl.shift
- l := hash << bl.shift >> bl.shift
- for i := uint64(0); i < bl.setLocs; i++ {
- if !bl.IsSet((h + i*l) & bl.size) {
- return false
- }
- }
- return true
-}
-
-// AddIfNotHas only Adds hash, if it's not present in the bloomfilter.
-// Returns true if hash was added.
-// Returns false if hash was already registered in the bloomfilter.
-func (bl *Bloom) AddIfNotHas(hash uint64) bool {
- if bl.Has(hash) {
- return false
- }
- bl.Add(hash)
- return true
-}
-
-// TotalSize returns the total size of the bloom filter.
-func (bl *Bloom) TotalSize() int {
- // The bl struct has 5 members and each one is 8 byte. The bitset is a
- // uint64 byte slice.
- return len(bl.bitset)*8 + 5*8
-}
-
-// Size makes Bloom filter with as bitset of size sz.
-func (bl *Bloom) Size(sz uint64) {
- bl.bitset = make([]uint64, sz>>6)
-}
-
-// Clear resets the Bloom filter.
-func (bl *Bloom) Clear() {
- for i := range bl.bitset {
- bl.bitset[i] = 0
- }
-}
-
-// Set sets the bit[idx] of bitset.
-func (bl *Bloom) Set(idx uint64) {
- ptr := unsafe.Pointer(uintptr(unsafe.Pointer(&bl.bitset[idx>>6])) + uintptr((idx%64)>>3))
- *(*uint8)(ptr) |= mask[idx%8]
-}
-
-// IsSet checks if bit[idx] of bitset is set, returns true/false.
-func (bl *Bloom) IsSet(idx uint64) bool {
- ptr := unsafe.Pointer(uintptr(unsafe.Pointer(&bl.bitset[idx>>6])) + uintptr((idx%64)>>3))
- r := ((*(*uint8)(ptr)) >> (idx % 8)) & 1
- return r == 1
-}
diff --git a/go/cache/ristretto/bloom/bbloom_test.go b/go/cache/ristretto/bloom/bbloom_test.go
deleted file mode 100644
index 7d280988bae..00000000000
--- a/go/cache/ristretto/bloom/bbloom_test.go
+++ /dev/null
@@ -1,86 +0,0 @@
-package bloom
-
-import (
- "crypto/rand"
- "os"
- "testing"
-
- _flag "vitess.io/vitess/go/internal/flag"
- "vitess.io/vitess/go/vt/log"
-
- "vitess.io/vitess/go/hack"
-)
-
-var (
- wordlist1 [][]byte
- n = uint64(1 << 16)
- bf *Bloom
-)
-
-func TestMain(m *testing.M) {
- // hack to get rid of an "ERROR: logging before flag.Parse"
- _flag.TrickGlog()
- wordlist1 = make([][]byte, n)
- for i := range wordlist1 {
- b := make([]byte, 32)
- _, _ = rand.Read(b)
- wordlist1[i] = b
- }
- log.Info("Benchmarks relate to 2**16 OP. --> output/65536 op/ns")
-
- os.Exit(m.Run())
-}
-
-func TestM_NumberOfWrongs(t *testing.T) {
- bf = NewBloomFilter(n*10, 7)
-
- cnt := 0
- for i := range wordlist1 {
- hash := hack.RuntimeMemhash(wordlist1[i], 0)
- if !bf.AddIfNotHas(hash) {
- cnt++
- }
- }
- log.Infof("Bloomfilter New(7* 2**16, 7) (-> size=%v bit): \n Check for 'false positives': %v wrong positive 'Has' results on 2**16 entries => %v %%", len(bf.bitset)<<6, cnt, float64(cnt)/float64(n))
-
-}
-
-func BenchmarkM_New(b *testing.B) {
- for r := 0; r < b.N; r++ {
- _ = NewBloomFilter(n*10, 7)
- }
-}
-
-func BenchmarkM_Clear(b *testing.B) {
- bf = NewBloomFilter(n*10, 7)
- for i := range wordlist1 {
- hash := hack.RuntimeMemhash(wordlist1[i], 0)
- bf.Add(hash)
- }
- b.ResetTimer()
- for r := 0; r < b.N; r++ {
- bf.Clear()
- }
-}
-
-func BenchmarkM_Add(b *testing.B) {
- bf = NewBloomFilter(n*10, 7)
- b.ResetTimer()
- for r := 0; r < b.N; r++ {
- for i := range wordlist1 {
- hash := hack.RuntimeMemhash(wordlist1[i], 0)
- bf.Add(hash)
- }
- }
-
-}
-
-func BenchmarkM_Has(b *testing.B) {
- b.ResetTimer()
- for r := 0; r < b.N; r++ {
- for i := range wordlist1 {
- hash := hack.RuntimeMemhash(wordlist1[i], 0)
- bf.Has(hash)
- }
- }
-}
diff --git a/go/cache/ristretto/cache.go b/go/cache/ristretto/cache.go
deleted file mode 100644
index b745d6dc991..00000000000
--- a/go/cache/ristretto/cache.go
+++ /dev/null
@@ -1,697 +0,0 @@
-/*
- * Copyright 2019 Dgraph Labs, Inc. and Contributors
- * Copyright 2021 The Vitess Authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Package ristretto is a fast, fixed size, in-memory cache with a dual focus on
-// throughput and hit ratio performance. You can easily add Ristretto to an
-// existing system and keep the most valuable data where you need it.
-package ristretto
-
-import (
- "bytes"
- "errors"
- "fmt"
- "sync"
- "sync/atomic"
- "time"
- "unsafe"
-
- "vitess.io/vitess/go/hack"
-)
-
-var (
- // TODO: find the optimal value for this or make it configurable
- setBufSize = 32 * 1024
-)
-
-func defaultStringHash(key string) (uint64, uint64) {
- const Seed1 = uint64(0x1122334455667788)
- const Seed2 = uint64(0x8877665544332211)
- return hack.RuntimeStrhash(key, Seed1), hack.RuntimeStrhash(key, Seed2)
-}
-
-type itemCallback func(*Item)
-
-// CacheItemSize is the overhead in bytes for every stored cache item
-var CacheItemSize = hack.RuntimeAllocSize(int64(unsafe.Sizeof(storeItem{})))
-
-// Cache is a thread-safe implementation of a hashmap with a TinyLFU admission
-// policy and a Sampled LFU eviction policy. You can use the same Cache instance
-// from as many goroutines as you want.
-type Cache struct {
- // store is the central concurrent hashmap where key-value items are stored.
- store store
- // policy determines what gets let in to the cache and what gets kicked out.
- policy policy
- // getBuf is a custom ring buffer implementation that gets pushed to when
- // keys are read.
- getBuf *ringBuffer
- // setBuf is a buffer allowing us to batch/drop Sets during times of high
- // contention.
- setBuf chan *Item
- // onEvict is called for item evictions.
- onEvict itemCallback
- // onReject is called when an item is rejected via admission policy.
- onReject itemCallback
- // onExit is called whenever a value goes out of scope from the cache.
- onExit func(any)
- // KeyToHash function is used to customize the key hashing algorithm.
- // Each key will be hashed using the provided function. If keyToHash value
- // is not set, the default keyToHash function is used.
- keyToHash func(string) (uint64, uint64)
- // stop is used to stop the processItems goroutine.
- stop chan struct{}
- // indicates whether cache is closed.
- isClosed bool
- // cost calculates cost from a value.
- cost func(value any) int64
- // ignoreInternalCost dictates whether to ignore the cost of internally storing
- // the item in the cost calculation.
- ignoreInternalCost bool
- // Metrics contains a running log of important statistics like hits, misses,
- // and dropped items.
- Metrics *Metrics
-}
-
-// Config is passed to NewCache for creating new Cache instances.
-type Config struct {
- // NumCounters determines the number of counters (keys) to keep that hold
- // access frequency information. It's generally a good idea to have more
- // counters than the max cache capacity, as this will improve eviction
- // accuracy and subsequent hit ratios.
- //
- // For example, if you expect your cache to hold 1,000,000 items when full,
- // NumCounters should be 10,000,000 (10x). Each counter takes up 4 bits, so
- // keeping 10,000,000 counters would require 5MB of memory.
- NumCounters int64
- // MaxCost can be considered as the cache capacity, in whatever units you
- // choose to use.
- //
- // For example, if you want the cache to have a max capacity of 100MB, you
- // would set MaxCost to 100,000,000 and pass an item's number of bytes as
- // the `cost` parameter for calls to Set. If new items are accepted, the
- // eviction process will take care of making room for the new item and not
- // overflowing the MaxCost value.
- MaxCost int64
- // BufferItems determines the size of Get buffers.
- //
- // Unless you have a rare use case, using `64` as the BufferItems value
- // results in good performance.
- BufferItems int64
- // Metrics determines whether cache statistics are kept during the cache's
- // lifetime. There *is* some overhead to keeping statistics, so you should
- // only set this flag to true when testing or throughput performance isn't a
- // major factor.
- Metrics bool
- // OnEvict is called for every eviction and passes the hashed key, value,
- // and cost to the function.
- OnEvict func(item *Item)
- // OnReject is called for every rejection done via the policy.
- OnReject func(item *Item)
- // OnExit is called whenever a value is removed from cache. This can be
- // used to do manual memory deallocation. Would also be called on eviction
- // and rejection of the value.
- OnExit func(val any)
- // KeyToHash function is used to customize the key hashing algorithm.
- // Each key will be hashed using the provided function. If keyToHash value
- // is not set, the default keyToHash function is used.
- KeyToHash func(string) (uint64, uint64)
- // Cost evaluates a value and outputs a corresponding cost. This function
- // is ran after Set is called for a new item or an item update with a cost
- // param of 0.
- Cost func(value any) int64
- // IgnoreInternalCost set to true indicates to the cache that the cost of
- // internally storing the value should be ignored. This is useful when the
- // cost passed to set is not using bytes as units. Keep in mind that setting
- // this to true will increase the memory usage.
- IgnoreInternalCost bool
-}
-
-type itemFlag byte
-
-const (
- itemNew itemFlag = iota
- itemDelete
- itemUpdate
-)
-
-// Item is passed to setBuf so items can eventually be added to the cache.
-type Item struct {
- flag itemFlag
- Key uint64
- Conflict uint64
- Value any
- Cost int64
- wg *sync.WaitGroup
-}
-
-// NewCache returns a new Cache instance and any configuration errors, if any.
-func NewCache(config *Config) (*Cache, error) {
- switch {
- case config.NumCounters == 0:
- return nil, errors.New("NumCounters can't be zero")
- case config.MaxCost == 0:
- return nil, errors.New("Capacity can't be zero")
- case config.BufferItems == 0:
- return nil, errors.New("BufferItems can't be zero")
- }
- policy := newPolicy(config.NumCounters, config.MaxCost)
- cache := &Cache{
- store: newStore(),
- policy: policy,
- getBuf: newRingBuffer(policy, config.BufferItems),
- setBuf: make(chan *Item, setBufSize),
- keyToHash: config.KeyToHash,
- stop: make(chan struct{}),
- cost: config.Cost,
- ignoreInternalCost: config.IgnoreInternalCost,
- }
- cache.onExit = func(val any) {
- if config.OnExit != nil && val != nil {
- config.OnExit(val)
- }
- }
- cache.onEvict = func(item *Item) {
- if config.OnEvict != nil {
- config.OnEvict(item)
- }
- cache.onExit(item.Value)
- }
- cache.onReject = func(item *Item) {
- if config.OnReject != nil {
- config.OnReject(item)
- }
- cache.onExit(item.Value)
- }
- if cache.keyToHash == nil {
- cache.keyToHash = defaultStringHash
- }
- if config.Metrics {
- cache.collectMetrics()
- }
- // NOTE: benchmarks seem to show that performance decreases the more
- // goroutines we have running cache.processItems(), so 1 should
- // usually be sufficient
- go cache.processItems()
- return cache, nil
-}
-
-// Wait blocks until all the current cache operations have been processed in the background
-func (c *Cache) Wait() {
- if c == nil || c.isClosed {
- return
- }
- wg := &sync.WaitGroup{}
- wg.Add(1)
- c.setBuf <- &Item{wg: wg}
- wg.Wait()
-}
-
-// Get returns the value (if any) and a boolean representing whether the
-// value was found or not. The value can be nil and the boolean can be true at
-// the same time.
-func (c *Cache) Get(key string) (any, bool) {
- if c == nil || c.isClosed {
- return nil, false
- }
- keyHash, conflictHash := c.keyToHash(key)
- c.getBuf.Push(keyHash)
- value, ok := c.store.Get(keyHash, conflictHash)
- if ok {
- c.Metrics.add(hit, keyHash, 1)
- } else {
- c.Metrics.add(miss, keyHash, 1)
- }
- return value, ok
-}
-
-// Set attempts to add the key-value item to the cache. If it returns false,
-// then the Set was dropped and the key-value item isn't added to the cache. If
-// it returns true, there's still a chance it could be dropped by the policy if
-// its determined that the key-value item isn't worth keeping, but otherwise the
-// item will be added and other items will be evicted in order to make room.
-//
-// The cost of the entry will be evaluated lazily by the cache's Cost function.
-func (c *Cache) Set(key string, value any) bool {
- return c.SetWithCost(key, value, 0)
-}
-
-// SetWithCost works like Set but adds a key-value pair to the cache with a specific
-// cost. The built-in Cost function will not be called to evaluate the object's cost
-// and instead the given value will be used.
-func (c *Cache) SetWithCost(key string, value any, cost int64) bool {
- if c == nil || c.isClosed {
- return false
- }
-
- keyHash, conflictHash := c.keyToHash(key)
- i := &Item{
- flag: itemNew,
- Key: keyHash,
- Conflict: conflictHash,
- Value: value,
- Cost: cost,
- }
- // cost is eventually updated. The expiration must also be immediately updated
- // to prevent items from being prematurely removed from the map.
- if prev, ok := c.store.Update(i); ok {
- c.onExit(prev)
- i.flag = itemUpdate
- }
- // Attempt to send item to policy.
- select {
- case c.setBuf <- i:
- return true
- default:
- if i.flag == itemUpdate {
- // Return true if this was an update operation since we've already
- // updated the store. For all the other operations (set/delete), we
- // return false which means the item was not inserted.
- return true
- }
- c.Metrics.add(dropSets, keyHash, 1)
- return false
- }
-}
-
-// Delete deletes the key-value item from the cache if it exists.
-func (c *Cache) Delete(key string) {
- if c == nil || c.isClosed {
- return
- }
- keyHash, conflictHash := c.keyToHash(key)
- // Delete immediately.
- _, prev := c.store.Del(keyHash, conflictHash)
- c.onExit(prev)
- // If we've set an item, it would be applied slightly later.
- // So we must push the same item to `setBuf` with the deletion flag.
- // This ensures that if a set is followed by a delete, it will be
- // applied in the correct order.
- c.setBuf <- &Item{
- flag: itemDelete,
- Key: keyHash,
- Conflict: conflictHash,
- }
-}
-
-// Close stops all goroutines and closes all channels.
-func (c *Cache) Close() {
- if c == nil || c.isClosed {
- return
- }
- c.Clear()
-
- // Block until processItems goroutine is returned.
- c.stop <- struct{}{}
- close(c.stop)
- close(c.setBuf)
- c.policy.Close()
- c.isClosed = true
-}
-
-// Clear empties the hashmap and zeroes all policy counters. Note that this is
-// not an atomic operation (but that shouldn't be a problem as it's assumed that
-// Set/Get calls won't be occurring until after this).
-func (c *Cache) Clear() {
- if c == nil || c.isClosed {
- return
- }
- // Block until processItems goroutine is returned.
- c.stop <- struct{}{}
-
- // Clear out the setBuf channel.
-loop:
- for {
- select {
- case i := <-c.setBuf:
- if i.wg != nil {
- i.wg.Done()
- continue
- }
- if i.flag != itemUpdate {
- // In itemUpdate, the value is already set in the store. So, no need to call
- // onEvict here.
- c.onEvict(i)
- }
- default:
- break loop
- }
- }
-
- // Clear value hashmap and policy data.
- c.policy.Clear()
- c.store.Clear(c.onEvict)
- // Only reset metrics if they're enabled.
- if c.Metrics != nil {
- c.Metrics.Clear()
- }
- // Restart processItems goroutine.
- go c.processItems()
-}
-
-// Len returns the size of the cache (in entries)
-func (c *Cache) Len() int {
- if c == nil {
- return 0
- }
- return c.store.Len()
-}
-
-// UsedCapacity returns the size of the cache (in bytes)
-func (c *Cache) UsedCapacity() int64 {
- if c == nil {
- return 0
- }
- return c.policy.Used()
-}
-
-// MaxCapacity returns the max cost of the cache (in bytes)
-func (c *Cache) MaxCapacity() int64 {
- if c == nil {
- return 0
- }
- return c.policy.MaxCost()
-}
-
-// SetCapacity updates the maxCost of an existing cache.
-func (c *Cache) SetCapacity(maxCost int64) {
- if c == nil {
- return
- }
- c.policy.UpdateMaxCost(maxCost)
-}
-
-// Evictions returns the number of evictions
-func (c *Cache) Evictions() int64 {
- // TODO
- if c == nil || c.Metrics == nil {
- return 0
- }
- return int64(c.Metrics.KeysEvicted())
-}
-
-// Hits returns the number of cache hits
-func (c *Cache) Hits() int64 {
- if c == nil || c.Metrics == nil {
- return 0
- }
- return int64(c.Metrics.Hits())
-}
-
-// Misses returns the number of cache misses
-func (c *Cache) Misses() int64 {
- if c == nil || c.Metrics == nil {
- return 0
- }
- return int64(c.Metrics.Misses())
-}
-
-// ForEach yields all the values currently stored in the cache to the given callback.
-// The callback may return `false` to stop the iteration early.
-func (c *Cache) ForEach(forEach func(any) bool) {
- if c == nil {
- return
- }
- c.store.ForEach(forEach)
-}
-
-// processItems is ran by goroutines processing the Set buffer.
-func (c *Cache) processItems() {
- startTs := make(map[uint64]time.Time)
- numToKeep := 100000 // TODO: Make this configurable via options.
-
- trackAdmission := func(key uint64) {
- if c.Metrics == nil {
- return
- }
- startTs[key] = time.Now()
- if len(startTs) > numToKeep {
- for k := range startTs {
- if len(startTs) <= numToKeep {
- break
- }
- delete(startTs, k)
- }
- }
- }
- onEvict := func(i *Item) {
- delete(startTs, i.Key)
- if c.onEvict != nil {
- c.onEvict(i)
- }
- }
-
- for {
- select {
- case i := <-c.setBuf:
- if i.wg != nil {
- i.wg.Done()
- continue
- }
- // Calculate item cost value if new or update.
- if i.Cost == 0 && c.cost != nil && i.flag != itemDelete {
- i.Cost = c.cost(i.Value)
- }
- if !c.ignoreInternalCost {
- // Add the cost of internally storing the object.
- i.Cost += CacheItemSize
- }
-
- switch i.flag {
- case itemNew:
- victims, added := c.policy.Add(i.Key, i.Cost)
- if added {
- c.store.Set(i)
- c.Metrics.add(keyAdd, i.Key, 1)
- trackAdmission(i.Key)
- } else {
- c.onReject(i)
- }
- for _, victim := range victims {
- victim.Conflict, victim.Value = c.store.Del(victim.Key, 0)
- onEvict(victim)
- }
-
- case itemUpdate:
- c.policy.Update(i.Key, i.Cost)
-
- case itemDelete:
- c.policy.Del(i.Key) // Deals with metrics updates.
- _, val := c.store.Del(i.Key, i.Conflict)
- c.onExit(val)
- }
- case <-c.stop:
- return
- }
- }
-}
-
-// collectMetrics just creates a new *Metrics instance and adds the pointers
-// to the cache and policy instances.
-func (c *Cache) collectMetrics() {
- c.Metrics = newMetrics()
- c.policy.CollectMetrics(c.Metrics)
-}
-
-type metricType int
-
-const (
- // The following 2 keep track of hits and misses.
- hit = iota
- miss
- // The following 3 keep track of number of keys added, updated and evicted.
- keyAdd
- keyUpdate
- keyEvict
- // The following 2 keep track of cost of keys added and evicted.
- costAdd
- costEvict
- // The following keep track of how many sets were dropped or rejected later.
- dropSets
- rejectSets
- // The following 2 keep track of how many gets were kept and dropped on the
- // floor.
- dropGets
- keepGets
- // This should be the final enum. Other enums should be set before this.
- doNotUse
-)
-
-func stringFor(t metricType) string {
- switch t {
- case hit:
- return "hit"
- case miss:
- return "miss"
- case keyAdd:
- return "keys-added"
- case keyUpdate:
- return "keys-updated"
- case keyEvict:
- return "keys-evicted"
- case costAdd:
- return "cost-added"
- case costEvict:
- return "cost-evicted"
- case dropSets:
- return "sets-dropped"
- case rejectSets:
- return "sets-rejected" // by policy.
- case dropGets:
- return "gets-dropped"
- case keepGets:
- return "gets-kept"
- default:
- return "unidentified"
- }
-}
-
-// Metrics is a snapshot of performance statistics for the lifetime of a cache instance.
-type Metrics struct {
- all [doNotUse][]*uint64
-}
-
-func newMetrics() *Metrics {
- s := &Metrics{}
- for i := 0; i < doNotUse; i++ {
- s.all[i] = make([]*uint64, 256)
- slice := s.all[i]
- for j := range slice {
- slice[j] = new(uint64)
- }
- }
- return s
-}
-
-func (p *Metrics) add(t metricType, hash, delta uint64) {
- if p == nil {
- return
- }
- valp := p.all[t]
- // Avoid false sharing by padding at least 64 bytes of space between two
- // atomic counters which would be incremented.
- idx := (hash % 25) * 10
- atomic.AddUint64(valp[idx], delta)
-}
-
-func (p *Metrics) get(t metricType) uint64 {
- if p == nil {
- return 0
- }
- valp := p.all[t]
- var total uint64
- for i := range valp {
- total += atomic.LoadUint64(valp[i])
- }
- return total
-}
-
-// Hits is the number of Get calls where a value was found for the corresponding key.
-func (p *Metrics) Hits() uint64 {
- return p.get(hit)
-}
-
-// Misses is the number of Get calls where a value was not found for the corresponding key.
-func (p *Metrics) Misses() uint64 {
- return p.get(miss)
-}
-
-// KeysAdded is the total number of Set calls where a new key-value item was added.
-func (p *Metrics) KeysAdded() uint64 {
- return p.get(keyAdd)
-}
-
-// KeysUpdated is the total number of Set calls where the value was updated.
-func (p *Metrics) KeysUpdated() uint64 {
- return p.get(keyUpdate)
-}
-
-// KeysEvicted is the total number of keys evicted.
-func (p *Metrics) KeysEvicted() uint64 {
- return p.get(keyEvict)
-}
-
-// CostAdded is the sum of costs that have been added (successful Set calls).
-func (p *Metrics) CostAdded() uint64 {
- return p.get(costAdd)
-}
-
-// CostEvicted is the sum of all costs that have been evicted.
-func (p *Metrics) CostEvicted() uint64 {
- return p.get(costEvict)
-}
-
-// SetsDropped is the number of Set calls that don't make it into internal
-// buffers (due to contention or some other reason).
-func (p *Metrics) SetsDropped() uint64 {
- return p.get(dropSets)
-}
-
-// SetsRejected is the number of Set calls rejected by the policy (TinyLFU).
-func (p *Metrics) SetsRejected() uint64 {
- return p.get(rejectSets)
-}
-
-// GetsDropped is the number of Get counter increments that are dropped
-// internally.
-func (p *Metrics) GetsDropped() uint64 {
- return p.get(dropGets)
-}
-
-// GetsKept is the number of Get counter increments that are kept.
-func (p *Metrics) GetsKept() uint64 {
- return p.get(keepGets)
-}
-
-// Ratio is the number of Hits over all accesses (Hits + Misses). This is the
-// percentage of successful Get calls.
-func (p *Metrics) Ratio() float64 {
- if p == nil {
- return 0.0
- }
- hits, misses := p.get(hit), p.get(miss)
- if hits == 0 && misses == 0 {
- return 0.0
- }
- return float64(hits) / float64(hits+misses)
-}
-
-// Clear resets all the metrics.
-func (p *Metrics) Clear() {
- if p == nil {
- return
- }
- for i := 0; i < doNotUse; i++ {
- for j := range p.all[i] {
- atomic.StoreUint64(p.all[i][j], 0)
- }
- }
-}
-
-// String returns a string representation of the metrics.
-func (p *Metrics) String() string {
- if p == nil {
- return ""
- }
- var buf bytes.Buffer
- for i := 0; i < doNotUse; i++ {
- t := metricType(i)
- fmt.Fprintf(&buf, "%s: %d ", stringFor(t), p.get(t))
- }
- fmt.Fprintf(&buf, "gets-total: %d ", p.get(hit)+p.get(miss))
- fmt.Fprintf(&buf, "hit-ratio: %.2f", p.Ratio())
- return buf.String()
-}
diff --git a/go/cache/ristretto/cache_test.go b/go/cache/ristretto/cache_test.go
deleted file mode 100644
index eda9f9109f3..00000000000
--- a/go/cache/ristretto/cache_test.go
+++ /dev/null
@@ -1,690 +0,0 @@
-/*
- * Copyright 2019 Dgraph Labs, Inc. and Contributors
- * Copyright 2021 The Vitess Authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package ristretto
-
-import (
- "fmt"
- "math/rand"
- "strconv"
- "strings"
- "sync"
- "testing"
- "time"
-
- "vitess.io/vitess/go/vt/log"
-
- "github.com/stretchr/testify/require"
-)
-
-var wait = time.Millisecond * 10
-
-func TestCacheKeyToHash(t *testing.T) {
- keyToHashCount := 0
- c, err := NewCache(&Config{
- NumCounters: 10,
- MaxCost: 1000,
- BufferItems: 64,
- IgnoreInternalCost: true,
- KeyToHash: func(key string) (uint64, uint64) {
- keyToHashCount++
- return defaultStringHash(key)
- },
- })
- require.NoError(t, err)
- if c.SetWithCost("1", 1, 1) {
- time.Sleep(wait)
- val, ok := c.Get("1")
- require.True(t, ok)
- require.NotNil(t, val)
- c.Delete("1")
- }
- require.Equal(t, 3, keyToHashCount)
-}
-
-func TestCacheMaxCost(t *testing.T) {
- charset := "abcdefghijklmnopqrstuvwxyz0123456789"
- key := func() string {
- k := make([]byte, 2)
- for i := range k {
- k[i] = charset[rand.Intn(len(charset))]
- }
- return string(k)
- }
- c, err := NewCache(&Config{
- NumCounters: 12960, // 36^2 * 10
- MaxCost: 1e6, // 1mb
- BufferItems: 64,
- Metrics: true,
- })
- require.NoError(t, err)
- stop := make(chan struct{}, 8)
- for i := 0; i < 8; i++ {
- go func() {
- for {
- select {
- case <-stop:
- return
- default:
- time.Sleep(time.Millisecond)
-
- k := key()
- if _, ok := c.Get(k); !ok {
- val := ""
- if rand.Intn(100) < 10 {
- val = "test"
- } else {
- val = strings.Repeat("a", 1000)
- }
- c.SetWithCost(key(), val, int64(2+len(val)))
- }
- }
- }
- }()
- }
- for i := 0; i < 20; i++ {
- time.Sleep(time.Second)
- cacheCost := c.Metrics.CostAdded() - c.Metrics.CostEvicted()
- log.Infof("total cache cost: %d", cacheCost)
- require.True(t, float64(cacheCost) <= float64(1e6*1.05))
- }
- for i := 0; i < 8; i++ {
- stop <- struct{}{}
- }
-}
-
-func TestUpdateMaxCost(t *testing.T) {
- c, err := NewCache(&Config{
- NumCounters: 10,
- MaxCost: 10,
- BufferItems: 64,
- })
- require.NoError(t, err)
- require.Equal(t, int64(10), c.MaxCapacity())
- require.True(t, c.SetWithCost("1", 1, 1))
- time.Sleep(wait)
- _, ok := c.Get("1")
- // Set is rejected because the cost of the entry is too high
- // when accounting for the internal cost of storing the entry.
- require.False(t, ok)
-
- // Update the max cost of the cache and retry.
- c.SetCapacity(1000)
- require.Equal(t, int64(1000), c.MaxCapacity())
- require.True(t, c.SetWithCost("1", 1, 1))
- time.Sleep(wait)
- val, ok := c.Get("1")
- require.True(t, ok)
- require.NotNil(t, val)
- c.Delete("1")
-}
-
-func TestNewCache(t *testing.T) {
- _, err := NewCache(&Config{
- NumCounters: 0,
- })
- require.Error(t, err)
-
- _, err = NewCache(&Config{
- NumCounters: 100,
- MaxCost: 0,
- })
- require.Error(t, err)
-
- _, err = NewCache(&Config{
- NumCounters: 100,
- MaxCost: 10,
- BufferItems: 0,
- })
- require.Error(t, err)
-
- c, err := NewCache(&Config{
- NumCounters: 100,
- MaxCost: 10,
- BufferItems: 64,
- Metrics: true,
- })
- require.NoError(t, err)
- require.NotNil(t, c)
-}
-
-func TestNilCache(t *testing.T) {
- var c *Cache
- val, ok := c.Get("1")
- require.False(t, ok)
- require.Nil(t, val)
-
- require.False(t, c.SetWithCost("1", 1, 1))
- c.Delete("1")
- c.Clear()
- c.Close()
-}
-
-func TestMultipleClose(t *testing.T) {
- var c *Cache
- c.Close()
-
- var err error
- c, err = NewCache(&Config{
- NumCounters: 100,
- MaxCost: 10,
- BufferItems: 64,
- Metrics: true,
- })
- require.NoError(t, err)
- c.Close()
- c.Close()
-}
-
-func TestSetAfterClose(t *testing.T) {
- c, err := newTestCache()
- require.NoError(t, err)
- require.NotNil(t, c)
-
- c.Close()
- require.False(t, c.SetWithCost("1", 1, 1))
-}
-
-func TestClearAfterClose(t *testing.T) {
- c, err := newTestCache()
- require.NoError(t, err)
- require.NotNil(t, c)
-
- c.Close()
- c.Clear()
-}
-
-func TestGetAfterClose(t *testing.T) {
- c, err := newTestCache()
- require.NoError(t, err)
- require.NotNil(t, c)
-
- require.True(t, c.SetWithCost("1", 1, 1))
- c.Close()
-
- _, ok := c.Get("2")
- require.False(t, ok)
-}
-
-func TestDelAfterClose(t *testing.T) {
- c, err := newTestCache()
- require.NoError(t, err)
- require.NotNil(t, c)
-
- require.True(t, c.SetWithCost("1", 1, 1))
- c.Close()
-
- c.Delete("1")
-}
-
-func TestCacheProcessItems(t *testing.T) {
- m := &sync.Mutex{}
- evicted := make(map[uint64]struct{})
- c, err := NewCache(&Config{
- NumCounters: 100,
- MaxCost: 10,
- BufferItems: 64,
- IgnoreInternalCost: true,
- Cost: func(value any) int64 {
- return int64(value.(int))
- },
- OnEvict: func(item *Item) {
- m.Lock()
- defer m.Unlock()
- evicted[item.Key] = struct{}{}
- },
- })
- require.NoError(t, err)
-
- var key uint64
- var conflict uint64
-
- key, conflict = defaultStringHash("1")
- c.setBuf <- &Item{
- flag: itemNew,
- Key: key,
- Conflict: conflict,
- Value: 1,
- Cost: 0,
- }
- time.Sleep(wait)
- require.True(t, c.policy.Has(key))
- require.Equal(t, int64(1), c.policy.Cost(key))
-
- key, conflict = defaultStringHash("1")
- c.setBuf <- &Item{
- flag: itemUpdate,
- Key: key,
- Conflict: conflict,
- Value: 2,
- Cost: 0,
- }
- time.Sleep(wait)
- require.Equal(t, int64(2), c.policy.Cost(key))
-
- key, conflict = defaultStringHash("1")
- c.setBuf <- &Item{
- flag: itemDelete,
- Key: key,
- Conflict: conflict,
- }
- time.Sleep(wait)
- key, conflict = defaultStringHash("1")
- val, ok := c.store.Get(key, conflict)
- require.False(t, ok)
- require.Nil(t, val)
- require.False(t, c.policy.Has(1))
-
- key, conflict = defaultStringHash("2")
- c.setBuf <- &Item{
- flag: itemNew,
- Key: key,
- Conflict: conflict,
- Value: 2,
- Cost: 3,
- }
- key, conflict = defaultStringHash("3")
- c.setBuf <- &Item{
- flag: itemNew,
- Key: key,
- Conflict: conflict,
- Value: 3,
- Cost: 3,
- }
- key, conflict = defaultStringHash("4")
- c.setBuf <- &Item{
- flag: itemNew,
- Key: key,
- Conflict: conflict,
- Value: 3,
- Cost: 3,
- }
- key, conflict = defaultStringHash("5")
- c.setBuf <- &Item{
- flag: itemNew,
- Key: key,
- Conflict: conflict,
- Value: 3,
- Cost: 5,
- }
- time.Sleep(wait)
- m.Lock()
- require.NotEqual(t, 0, len(evicted))
- m.Unlock()
-
- defer func() {
- require.NotNil(t, recover())
- }()
- c.Close()
- c.setBuf <- &Item{flag: itemNew}
-}
-
-func TestCacheGet(t *testing.T) {
- c, err := NewCache(&Config{
- NumCounters: 100,
- MaxCost: 10,
- BufferItems: 64,
- IgnoreInternalCost: true,
- Metrics: true,
- })
- require.NoError(t, err)
-
- key, conflict := defaultStringHash("1")
- i := Item{
- Key: key,
- Conflict: conflict,
- Value: 1,
- }
- c.store.Set(&i)
- val, ok := c.Get("1")
- require.True(t, ok)
- require.NotNil(t, val)
-
- val, ok = c.Get("2")
- require.False(t, ok)
- require.Nil(t, val)
-
- // 0.5 and not 1.0 because we tried Getting each item twice
- require.Equal(t, 0.5, c.Metrics.Ratio())
-
- c = nil
- val, ok = c.Get("0")
- require.False(t, ok)
- require.Nil(t, val)
-}
-
-// retrySet calls SetWithCost until the item is accepted by the cache.
-func retrySet(t *testing.T, c *Cache, key string, value int, cost int64) {
- for {
- if set := c.SetWithCost(key, value, cost); !set {
- time.Sleep(wait)
- continue
- }
-
- time.Sleep(wait)
- val, ok := c.Get(key)
- require.True(t, ok)
- require.NotNil(t, val)
- require.Equal(t, value, val.(int))
- return
- }
-}
-
-func TestCacheSet(t *testing.T) {
- c, err := NewCache(&Config{
- NumCounters: 100,
- MaxCost: 10,
- IgnoreInternalCost: true,
- BufferItems: 64,
- Metrics: true,
- })
- require.NoError(t, err)
-
- retrySet(t, c, "1", 1, 1)
-
- c.SetWithCost("1", 2, 2)
- val, ok := c.store.Get(defaultStringHash("1"))
- require.True(t, ok)
- require.Equal(t, 2, val.(int))
-
- c.stop <- struct{}{}
- for i := 0; i < setBufSize; i++ {
- key, conflict := defaultStringHash("1")
- c.setBuf <- &Item{
- flag: itemUpdate,
- Key: key,
- Conflict: conflict,
- Value: 1,
- Cost: 1,
- }
- }
- require.False(t, c.SetWithCost("2", 2, 1))
- require.Equal(t, uint64(1), c.Metrics.SetsDropped())
- close(c.setBuf)
- close(c.stop)
-
- c = nil
- require.False(t, c.SetWithCost("1", 1, 1))
-}
-
-func TestCacheInternalCost(t *testing.T) {
- c, err := NewCache(&Config{
- NumCounters: 100,
- MaxCost: 10,
- BufferItems: 64,
- Metrics: true,
- })
- require.NoError(t, err)
-
- // Get should return false because the cache's cost is too small to store the item
- // when accounting for the internal cost.
- c.SetWithCost("1", 1, 1)
- time.Sleep(wait)
- _, ok := c.Get("1")
- require.False(t, ok)
-}
-
-func TestCacheDel(t *testing.T) {
- c, err := NewCache(&Config{
- NumCounters: 100,
- MaxCost: 10,
- BufferItems: 64,
- })
- require.NoError(t, err)
-
- c.SetWithCost("1", 1, 1)
- c.Delete("1")
- // The deletes and sets are pushed through the setbuf. It might be possible
- // that the delete is not processed before the following get is called. So
- // wait for a millisecond for things to be processed.
- time.Sleep(time.Millisecond)
- val, ok := c.Get("1")
- require.False(t, ok)
- require.Nil(t, val)
-
- c = nil
- defer func() {
- require.Nil(t, recover())
- }()
- c.Delete("1")
-}
-
-func TestCacheClear(t *testing.T) {
- c, err := NewCache(&Config{
- NumCounters: 100,
- MaxCost: 10,
- IgnoreInternalCost: true,
- BufferItems: 64,
- Metrics: true,
- })
- require.NoError(t, err)
-
- for i := 0; i < 10; i++ {
- c.SetWithCost(strconv.Itoa(i), i, 1)
- }
- time.Sleep(wait)
- require.Equal(t, uint64(10), c.Metrics.KeysAdded())
-
- c.Clear()
- require.Equal(t, uint64(0), c.Metrics.KeysAdded())
-
- for i := 0; i < 10; i++ {
- val, ok := c.Get(strconv.Itoa(i))
- require.False(t, ok)
- require.Nil(t, val)
- }
-}
-
-func TestCacheMetrics(t *testing.T) {
- c, err := NewCache(&Config{
- NumCounters: 100,
- MaxCost: 10,
- IgnoreInternalCost: true,
- BufferItems: 64,
- Metrics: true,
- })
- require.NoError(t, err)
-
- for i := 0; i < 10; i++ {
- c.SetWithCost(strconv.Itoa(i), i, 1)
- }
- time.Sleep(wait)
- m := c.Metrics
- require.Equal(t, uint64(10), m.KeysAdded())
-}
-
-func TestMetrics(t *testing.T) {
- newMetrics()
-}
-
-func TestNilMetrics(t *testing.T) {
- var m *Metrics
- for _, f := range []func() uint64{
- m.Hits,
- m.Misses,
- m.KeysAdded,
- m.KeysEvicted,
- m.CostEvicted,
- m.SetsDropped,
- m.SetsRejected,
- m.GetsDropped,
- m.GetsKept,
- } {
- require.Equal(t, uint64(0), f())
- }
-}
-
-func TestMetricsAddGet(t *testing.T) {
- m := newMetrics()
- m.add(hit, 1, 1)
- m.add(hit, 2, 2)
- m.add(hit, 3, 3)
- require.Equal(t, uint64(6), m.Hits())
-
- m = nil
- m.add(hit, 1, 1)
- require.Equal(t, uint64(0), m.Hits())
-}
-
-func TestMetricsRatio(t *testing.T) {
- m := newMetrics()
- require.Equal(t, float64(0), m.Ratio())
-
- m.add(hit, 1, 1)
- m.add(hit, 2, 2)
- m.add(miss, 1, 1)
- m.add(miss, 2, 2)
- require.Equal(t, 0.5, m.Ratio())
-
- m = nil
- require.Equal(t, float64(0), m.Ratio())
-}
-
-func TestMetricsString(t *testing.T) {
- m := newMetrics()
- m.add(hit, 1, 1)
- m.add(miss, 1, 1)
- m.add(keyAdd, 1, 1)
- m.add(keyUpdate, 1, 1)
- m.add(keyEvict, 1, 1)
- m.add(costAdd, 1, 1)
- m.add(costEvict, 1, 1)
- m.add(dropSets, 1, 1)
- m.add(rejectSets, 1, 1)
- m.add(dropGets, 1, 1)
- m.add(keepGets, 1, 1)
- require.Equal(t, uint64(1), m.Hits())
- require.Equal(t, uint64(1), m.Misses())
- require.Equal(t, 0.5, m.Ratio())
- require.Equal(t, uint64(1), m.KeysAdded())
- require.Equal(t, uint64(1), m.KeysUpdated())
- require.Equal(t, uint64(1), m.KeysEvicted())
- require.Equal(t, uint64(1), m.CostAdded())
- require.Equal(t, uint64(1), m.CostEvicted())
- require.Equal(t, uint64(1), m.SetsDropped())
- require.Equal(t, uint64(1), m.SetsRejected())
- require.Equal(t, uint64(1), m.GetsDropped())
- require.Equal(t, uint64(1), m.GetsKept())
-
- require.NotEqual(t, 0, len(m.String()))
-
- m = nil
- require.Equal(t, 0, len(m.String()))
-
- require.Equal(t, "unidentified", stringFor(doNotUse))
-}
-
-func TestCacheMetricsClear(t *testing.T) {
- c, err := NewCache(&Config{
- NumCounters: 100,
- MaxCost: 10,
- BufferItems: 64,
- Metrics: true,
- })
- require.NoError(t, err)
-
- c.SetWithCost("1", 1, 1)
- stop := make(chan struct{})
- go func() {
- for {
- select {
- case <-stop:
- return
- default:
- c.Get("1")
- }
- }
- }()
- time.Sleep(wait)
- c.Clear()
- stop <- struct{}{}
- c.Metrics = nil
- c.Metrics.Clear()
-}
-
-// Regression test for bug https://github.com/dgraph-io/ristretto/issues/167
-func TestDropUpdates(t *testing.T) {
- originalSetBugSize := setBufSize
- defer func() { setBufSize = originalSetBugSize }()
-
- test := func() {
- // dropppedMap stores the items dropped from the cache.
- droppedMap := make(map[int]struct{})
- lastEvictedSet := int64(-1)
-
- var err error
- handler := func(_ any, value any) {
- v := value.(string)
- lastEvictedSet, err = strconv.ParseInt(string(v), 10, 32)
- require.NoError(t, err)
-
- _, ok := droppedMap[int(lastEvictedSet)]
- if ok {
- panic(fmt.Sprintf("val = %+v was dropped but it got evicted. Dropped items: %+v\n",
- lastEvictedSet, droppedMap))
- }
- }
-
- // This is important. The race condition shows up only when the setBuf
- // is full and that's why we reduce the buf size here. The test will
- // try to fill up the setbuf to it's capacity and then perform an
- // update on a key.
- setBufSize = 10
-
- c, err := NewCache(&Config{
- NumCounters: 100,
- MaxCost: 10,
- BufferItems: 64,
- Metrics: true,
- OnEvict: func(item *Item) {
- if item.Value != nil {
- handler(nil, item.Value)
- }
- },
- })
- require.NoError(t, err)
-
- for i := 0; i < 5*setBufSize; i++ {
- v := fmt.Sprintf("%0100d", i)
- // We're updating the same key.
- if !c.SetWithCost("0", v, 1) {
- // The race condition doesn't show up without this sleep.
- time.Sleep(time.Microsecond)
- droppedMap[i] = struct{}{}
- }
- }
- // Wait for all the items to be processed.
- c.Wait()
- // This will cause eviction from the cache.
- require.True(t, c.SetWithCost("1", nil, 10))
- c.Close()
- }
-
- // Run the test 100 times since it's not reliable.
- for i := 0; i < 100; i++ {
- test()
- }
-}
-
-func newTestCache() (*Cache, error) {
- return NewCache(&Config{
- NumCounters: 100,
- MaxCost: 10,
- BufferItems: 64,
- Metrics: true,
- })
-}
diff --git a/go/cache/ristretto/policy.go b/go/cache/ristretto/policy.go
deleted file mode 100644
index 84cc008cb99..00000000000
--- a/go/cache/ristretto/policy.go
+++ /dev/null
@@ -1,423 +0,0 @@
-/*
- * Copyright 2020 Dgraph Labs, Inc. and Contributors
- * Copyright 2021 The Vitess Authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package ristretto
-
-import (
- "math"
- "sync"
- "sync/atomic"
-
- "vitess.io/vitess/go/cache/ristretto/bloom"
-)
-
-const (
- // lfuSample is the number of items to sample when looking at eviction
- // candidates. 5 seems to be the most optimal number [citation needed].
- lfuSample = 5
-)
-
-// policy is the interface encapsulating eviction/admission behavior.
-//
-// TODO: remove this interface and just rename defaultPolicy to policy, as we
-//
-// are probably only going to use/implement/maintain one policy.
-type policy interface {
- ringConsumer
- // Add attempts to Add the key-cost pair to the Policy. It returns a slice
- // of evicted keys and a bool denoting whether or not the key-cost pair
- // was added. If it returns true, the key should be stored in cache.
- Add(uint64, int64) ([]*Item, bool)
- // Has returns true if the key exists in the Policy.
- Has(uint64) bool
- // Del deletes the key from the Policy.
- Del(uint64)
- // Cap returns the amount of used capacity.
- Used() int64
- // Close stops all goroutines and closes all channels.
- Close()
- // Update updates the cost value for the key.
- Update(uint64, int64)
- // Cost returns the cost value of a key or -1 if missing.
- Cost(uint64) int64
- // Optionally, set stats object to track how policy is performing.
- CollectMetrics(*Metrics)
- // Clear zeroes out all counters and clears hashmaps.
- Clear()
- // MaxCost returns the current max cost of the cache policy.
- MaxCost() int64
- // UpdateMaxCost updates the max cost of the cache policy.
- UpdateMaxCost(int64)
-}
-
-func newPolicy(numCounters, maxCost int64) policy {
- return newDefaultPolicy(numCounters, maxCost)
-}
-
-type defaultPolicy struct {
- sync.Mutex
- admit *tinyLFU
- evict *sampledLFU
- itemsCh chan []uint64
- stop chan struct{}
- isClosed bool
- metrics *Metrics
- numCounters int64
- maxCost int64
-}
-
-func newDefaultPolicy(numCounters, maxCost int64) *defaultPolicy {
- p := &defaultPolicy{
- admit: newTinyLFU(numCounters),
- evict: newSampledLFU(maxCost),
- itemsCh: make(chan []uint64, 3),
- stop: make(chan struct{}),
- numCounters: numCounters,
- maxCost: maxCost,
- }
- go p.processItems()
- return p
-}
-
-func (p *defaultPolicy) CollectMetrics(metrics *Metrics) {
- p.metrics = metrics
- p.evict.metrics = metrics
-}
-
-type policyPair struct {
- key uint64
- cost int64
-}
-
-func (p *defaultPolicy) processItems() {
- for {
- select {
- case items := <-p.itemsCh:
- p.Lock()
- p.admit.Push(items)
- p.Unlock()
- case <-p.stop:
- return
- }
- }
-}
-
-func (p *defaultPolicy) Push(keys []uint64) bool {
- if p.isClosed {
- return false
- }
-
- if len(keys) == 0 {
- return true
- }
-
- select {
- case p.itemsCh <- keys:
- p.metrics.add(keepGets, keys[0], uint64(len(keys)))
- return true
- default:
- p.metrics.add(dropGets, keys[0], uint64(len(keys)))
- return false
- }
-}
-
-// Add decides whether the item with the given key and cost should be accepted by
-// the policy. It returns the list of victims that have been evicted and a boolean
-// indicating whether the incoming item should be accepted.
-func (p *defaultPolicy) Add(key uint64, cost int64) ([]*Item, bool) {
- p.Lock()
- defer p.Unlock()
-
- // Cannot add an item bigger than entire cache.
- if cost > p.evict.getMaxCost() {
- return nil, false
- }
-
- // No need to go any further if the item is already in the cache.
- if has := p.evict.updateIfHas(key, cost); has {
- // An update does not count as an addition, so return false.
- return nil, false
- }
-
- // If the execution reaches this point, the key doesn't exist in the cache.
- // Calculate the remaining room in the cache (usually bytes).
- room := p.evict.roomLeft(cost)
- if room >= 0 {
- // There's enough room in the cache to store the new item without
- // overflowing. Do that now and stop here.
- p.evict.add(key, cost)
- p.metrics.add(costAdd, key, uint64(cost))
- return nil, true
- }
-
- // incHits is the hit count for the incoming item.
- incHits := p.admit.Estimate(key)
- // sample is the eviction candidate pool to be filled via random sampling.
- // TODO: perhaps we should use a min heap here. Right now our time
- // complexity is N for finding the min. Min heap should bring it down to
- // O(lg N).
- sample := make([]*policyPair, 0, lfuSample)
- // As items are evicted they will be appended to victims.
- victims := make([]*Item, 0)
-
- // Delete victims until there's enough space or a minKey is found that has
- // more hits than incoming item.
- for ; room < 0; room = p.evict.roomLeft(cost) {
- // Fill up empty slots in sample.
- sample = p.evict.fillSample(sample)
-
- // Find minimally used item in sample.
- minKey, minHits, minID, minCost := uint64(0), int64(math.MaxInt64), 0, int64(0)
- for i, pair := range sample {
- // Look up hit count for sample key.
- if hits := p.admit.Estimate(pair.key); hits < minHits {
- minKey, minHits, minID, minCost = pair.key, hits, i, pair.cost
- }
- }
-
- // If the incoming item isn't worth keeping in the policy, reject.
- if incHits < minHits {
- p.metrics.add(rejectSets, key, 1)
- return victims, false
- }
-
- // Delete the victim from metadata.
- p.evict.del(minKey)
-
- // Delete the victim from sample.
- sample[minID] = sample[len(sample)-1]
- sample = sample[:len(sample)-1]
- // Store victim in evicted victims slice.
- victims = append(victims, &Item{
- Key: minKey,
- Conflict: 0,
- Cost: minCost,
- })
- }
-
- p.evict.add(key, cost)
- p.metrics.add(costAdd, key, uint64(cost))
- return victims, true
-}
-
-func (p *defaultPolicy) Has(key uint64) bool {
- p.Lock()
- _, exists := p.evict.keyCosts[key]
- p.Unlock()
- return exists
-}
-
-func (p *defaultPolicy) Del(key uint64) {
- p.Lock()
- p.evict.del(key)
- p.Unlock()
-}
-
-func (p *defaultPolicy) Used() int64 {
- p.Lock()
- used := p.evict.used
- p.Unlock()
- return used
-}
-
-func (p *defaultPolicy) Update(key uint64, cost int64) {
- p.Lock()
- p.evict.updateIfHas(key, cost)
- p.Unlock()
-}
-
-func (p *defaultPolicy) Cost(key uint64) int64 {
- p.Lock()
- if cost, found := p.evict.keyCosts[key]; found {
- p.Unlock()
- return cost
- }
- p.Unlock()
- return -1
-}
-
-func (p *defaultPolicy) Clear() {
- p.Lock()
- p.admit = newTinyLFU(p.numCounters)
- p.evict = newSampledLFU(p.maxCost)
- p.Unlock()
-}
-
-func (p *defaultPolicy) Close() {
- if p.isClosed {
- return
- }
-
- // Block until the p.processItems goroutine returns.
- p.stop <- struct{}{}
- close(p.stop)
- close(p.itemsCh)
- p.isClosed = true
-}
-
-func (p *defaultPolicy) MaxCost() int64 {
- if p == nil || p.evict == nil {
- return 0
- }
- return p.evict.getMaxCost()
-}
-
-func (p *defaultPolicy) UpdateMaxCost(maxCost int64) {
- if p == nil || p.evict == nil {
- return
- }
- p.evict.updateMaxCost(maxCost)
-}
-
-// sampledLFU is an eviction helper storing key-cost pairs.
-type sampledLFU struct {
- keyCosts map[uint64]int64
- maxCost int64
- used int64
- metrics *Metrics
-}
-
-func newSampledLFU(maxCost int64) *sampledLFU {
- return &sampledLFU{
- keyCosts: make(map[uint64]int64),
- maxCost: maxCost,
- }
-}
-
-func (p *sampledLFU) getMaxCost() int64 {
- return atomic.LoadInt64(&p.maxCost)
-}
-
-func (p *sampledLFU) updateMaxCost(maxCost int64) {
- atomic.StoreInt64(&p.maxCost, maxCost)
-}
-
-func (p *sampledLFU) roomLeft(cost int64) int64 {
- return p.getMaxCost() - (p.used + cost)
-}
-
-func (p *sampledLFU) fillSample(in []*policyPair) []*policyPair {
- if len(in) >= lfuSample {
- return in
- }
- for key, cost := range p.keyCosts {
- in = append(in, &policyPair{key, cost})
- if len(in) >= lfuSample {
- return in
- }
- }
- return in
-}
-
-func (p *sampledLFU) del(key uint64) {
- cost, ok := p.keyCosts[key]
- if !ok {
- return
- }
- p.used -= cost
- delete(p.keyCosts, key)
- p.metrics.add(costEvict, key, uint64(cost))
- p.metrics.add(keyEvict, key, 1)
-}
-
-func (p *sampledLFU) add(key uint64, cost int64) {
- p.keyCosts[key] = cost
- p.used += cost
-}
-
-func (p *sampledLFU) updateIfHas(key uint64, cost int64) bool {
- if prev, found := p.keyCosts[key]; found {
- // Update the cost of an existing key, but don't worry about evicting.
- // Evictions will be handled the next time a new item is added.
- p.metrics.add(keyUpdate, key, 1)
- if prev > cost {
- diff := prev - cost
- p.metrics.add(costAdd, key, ^uint64(uint64(diff)-1))
- } else if cost > prev {
- diff := cost - prev
- p.metrics.add(costAdd, key, uint64(diff))
- }
- p.used += cost - prev
- p.keyCosts[key] = cost
- return true
- }
- return false
-}
-
-func (p *sampledLFU) clear() {
- p.used = 0
- p.keyCosts = make(map[uint64]int64)
-}
-
-// tinyLFU is an admission helper that keeps track of access frequency using
-// tiny (4-bit) counters in the form of a count-min sketch.
-// tinyLFU is NOT thread safe.
-type tinyLFU struct {
- freq *cmSketch
- door *bloom.Bloom
- incrs int64
- resetAt int64
-}
-
-func newTinyLFU(numCounters int64) *tinyLFU {
- return &tinyLFU{
- freq: newCmSketch(numCounters),
- door: bloom.NewBloomFilterWithErrorRate(uint64(numCounters), 0.01),
- resetAt: numCounters,
- }
-}
-
-func (p *tinyLFU) Push(keys []uint64) {
- for _, key := range keys {
- p.Increment(key)
- }
-}
-
-func (p *tinyLFU) Estimate(key uint64) int64 {
- hits := p.freq.Estimate(key)
- if p.door.Has(key) {
- hits++
- }
- return hits
-}
-
-func (p *tinyLFU) Increment(key uint64) {
- // Flip doorkeeper bit if not already done.
- if added := p.door.AddIfNotHas(key); !added {
- // Increment count-min counter if doorkeeper bit is already set.
- p.freq.Increment(key)
- }
- p.incrs++
- if p.incrs >= p.resetAt {
- p.reset()
- }
-}
-
-func (p *tinyLFU) reset() {
- // Zero out incrs.
- p.incrs = 0
- // clears doorkeeper bits
- p.door.Clear()
- // halves count-min counters
- p.freq.Reset()
-}
-
-func (p *tinyLFU) clear() {
- p.incrs = 0
- p.freq.Clear()
- p.door.Clear()
-}
diff --git a/go/cache/ristretto/policy_test.go b/go/cache/ristretto/policy_test.go
deleted file mode 100644
index c864b6c74d0..00000000000
--- a/go/cache/ristretto/policy_test.go
+++ /dev/null
@@ -1,276 +0,0 @@
-/*
- * Copyright 2020 Dgraph Labs, Inc. and Contributors
- * Copyright 2021 The Vitess Authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package ristretto
-
-import (
- "testing"
- "time"
-
- "github.com/stretchr/testify/require"
-)
-
-func TestPolicy(t *testing.T) {
- defer func() {
- require.Nil(t, recover())
- }()
- newPolicy(100, 10)
-}
-
-func TestPolicyMetrics(t *testing.T) {
- p := newDefaultPolicy(100, 10)
- p.CollectMetrics(newMetrics())
- require.NotNil(t, p.metrics)
- require.NotNil(t, p.evict.metrics)
-}
-
-func TestPolicyProcessItems(t *testing.T) {
- p := newDefaultPolicy(100, 10)
- p.itemsCh <- []uint64{1, 2, 2}
- time.Sleep(wait)
- p.Lock()
- require.Equal(t, int64(2), p.admit.Estimate(2))
- require.Equal(t, int64(1), p.admit.Estimate(1))
- p.Unlock()
-
- p.stop <- struct{}{}
- p.itemsCh <- []uint64{3, 3, 3}
- time.Sleep(wait)
- p.Lock()
- require.Equal(t, int64(0), p.admit.Estimate(3))
- p.Unlock()
-}
-
-func TestPolicyPush(t *testing.T) {
- p := newDefaultPolicy(100, 10)
- require.True(t, p.Push([]uint64{}))
-
- keepCount := 0
- for i := 0; i < 10; i++ {
- if p.Push([]uint64{1, 2, 3, 4, 5}) {
- keepCount++
- }
- }
- require.NotEqual(t, 0, keepCount)
-}
-
-func TestPolicyAdd(t *testing.T) {
- p := newDefaultPolicy(1000, 100)
- if victims, added := p.Add(1, 101); victims != nil || added {
- t.Fatal("can't add an item bigger than entire cache")
- }
- p.Lock()
- p.evict.add(1, 1)
- p.admit.Increment(1)
- p.admit.Increment(2)
- p.admit.Increment(3)
- p.Unlock()
-
- victims, added := p.Add(1, 1)
- require.Nil(t, victims)
- require.False(t, added)
-
- victims, added = p.Add(2, 20)
- require.Nil(t, victims)
- require.True(t, added)
-
- victims, added = p.Add(3, 90)
- require.NotNil(t, victims)
- require.True(t, added)
-
- victims, added = p.Add(4, 20)
- require.NotNil(t, victims)
- require.False(t, added)
-}
-
-func TestPolicyHas(t *testing.T) {
- p := newDefaultPolicy(100, 10)
- p.Add(1, 1)
- require.True(t, p.Has(1))
- require.False(t, p.Has(2))
-}
-
-func TestPolicyDel(t *testing.T) {
- p := newDefaultPolicy(100, 10)
- p.Add(1, 1)
- p.Del(1)
- p.Del(2)
- require.False(t, p.Has(1))
- require.False(t, p.Has(2))
-}
-
-func TestPolicyCap(t *testing.T) {
- p := newDefaultPolicy(100, 10)
- p.Add(1, 1)
- require.Equal(t, int64(9), p.MaxCost()-p.Used())
-}
-
-func TestPolicyUpdate(t *testing.T) {
- p := newDefaultPolicy(100, 10)
- p.Add(1, 1)
- p.Update(1, 2)
- p.Lock()
- require.Equal(t, int64(2), p.evict.keyCosts[1])
- p.Unlock()
-}
-
-func TestPolicyCost(t *testing.T) {
- p := newDefaultPolicy(100, 10)
- p.Add(1, 2)
- require.Equal(t, int64(2), p.Cost(1))
- require.Equal(t, int64(-1), p.Cost(2))
-}
-
-func TestPolicyClear(t *testing.T) {
- p := newDefaultPolicy(100, 10)
- p.Add(1, 1)
- p.Add(2, 2)
- p.Add(3, 3)
- p.Clear()
- require.Equal(t, int64(10), p.MaxCost()-p.Used())
- require.False(t, p.Has(1))
- require.False(t, p.Has(2))
- require.False(t, p.Has(3))
-}
-
-func TestPolicyClose(t *testing.T) {
- defer func() {
- require.NotNil(t, recover())
- }()
-
- p := newDefaultPolicy(100, 10)
- p.Add(1, 1)
- p.Close()
- p.itemsCh <- []uint64{1}
-}
-
-func TestPushAfterClose(t *testing.T) {
- p := newDefaultPolicy(100, 10)
- p.Close()
- require.False(t, p.Push([]uint64{1, 2}))
-}
-
-func TestAddAfterClose(t *testing.T) {
- p := newDefaultPolicy(100, 10)
- p.Close()
- p.Add(1, 1)
-}
-
-func TestSampledLFUAdd(t *testing.T) {
- e := newSampledLFU(4)
- e.add(1, 1)
- e.add(2, 2)
- e.add(3, 1)
- require.Equal(t, int64(4), e.used)
- require.Equal(t, int64(2), e.keyCosts[2])
-}
-
-func TestSampledLFUDel(t *testing.T) {
- e := newSampledLFU(4)
- e.add(1, 1)
- e.add(2, 2)
- e.del(2)
- require.Equal(t, int64(1), e.used)
- _, ok := e.keyCosts[2]
- require.False(t, ok)
- e.del(4)
-}
-
-func TestSampledLFUUpdate(t *testing.T) {
- e := newSampledLFU(4)
- e.add(1, 1)
- require.True(t, e.updateIfHas(1, 2))
- require.Equal(t, int64(2), e.used)
- require.False(t, e.updateIfHas(2, 2))
-}
-
-func TestSampledLFUClear(t *testing.T) {
- e := newSampledLFU(4)
- e.add(1, 1)
- e.add(2, 2)
- e.add(3, 1)
- e.clear()
- require.Equal(t, 0, len(e.keyCosts))
- require.Equal(t, int64(0), e.used)
-}
-
-func TestSampledLFURoom(t *testing.T) {
- e := newSampledLFU(16)
- e.add(1, 1)
- e.add(2, 2)
- e.add(3, 3)
- require.Equal(t, int64(6), e.roomLeft(4))
-}
-
-func TestSampledLFUSample(t *testing.T) {
- e := newSampledLFU(16)
- e.add(4, 4)
- e.add(5, 5)
- sample := e.fillSample([]*policyPair{
- {1, 1},
- {2, 2},
- {3, 3},
- })
- k := sample[len(sample)-1].key
- require.Equal(t, 5, len(sample))
- require.NotEqual(t, 1, k)
- require.NotEqual(t, 2, k)
- require.NotEqual(t, 3, k)
- require.Equal(t, len(sample), len(e.fillSample(sample)))
- e.del(5)
- sample = e.fillSample(sample[:len(sample)-2])
- require.Equal(t, 4, len(sample))
-}
-
-func TestTinyLFUIncrement(t *testing.T) {
- a := newTinyLFU(4)
- a.Increment(1)
- a.Increment(1)
- a.Increment(1)
- require.True(t, a.door.Has(1))
- require.Equal(t, int64(2), a.freq.Estimate(1))
-
- a.Increment(1)
- require.False(t, a.door.Has(1))
- require.Equal(t, int64(1), a.freq.Estimate(1))
-}
-
-func TestTinyLFUEstimate(t *testing.T) {
- a := newTinyLFU(8)
- a.Increment(1)
- a.Increment(1)
- a.Increment(1)
- require.Equal(t, int64(3), a.Estimate(1))
- require.Equal(t, int64(0), a.Estimate(2))
-}
-
-func TestTinyLFUPush(t *testing.T) {
- a := newTinyLFU(16)
- a.Push([]uint64{1, 2, 2, 3, 3, 3})
- require.Equal(t, int64(1), a.Estimate(1))
- require.Equal(t, int64(2), a.Estimate(2))
- require.Equal(t, int64(3), a.Estimate(3))
- require.Equal(t, int64(6), a.incrs)
-}
-
-func TestTinyLFUClear(t *testing.T) {
- a := newTinyLFU(16)
- a.Push([]uint64{1, 3, 3, 3})
- a.clear()
- require.Equal(t, int64(0), a.incrs)
- require.Equal(t, int64(0), a.Estimate(3))
-}
diff --git a/go/cache/ristretto/ring.go b/go/cache/ristretto/ring.go
deleted file mode 100644
index 84d8689ee37..00000000000
--- a/go/cache/ristretto/ring.go
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright 2019 Dgraph Labs, Inc. and Contributors
- * Copyright 2021 The Vitess Authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package ristretto
-
-import (
- "sync"
-)
-
-// ringConsumer is the user-defined object responsible for receiving and
-// processing items in batches when buffers are drained.
-type ringConsumer interface {
- Push([]uint64) bool
-}
-
-// ringStripe is a singular ring buffer that is not concurrent safe.
-type ringStripe struct {
- cons ringConsumer
- data []uint64
- capa int
-}
-
-func newRingStripe(cons ringConsumer, capa int64) *ringStripe {
- return &ringStripe{
- cons: cons,
- data: make([]uint64, 0, capa),
- capa: int(capa),
- }
-}
-
-// Push appends an item in the ring buffer and drains (copies items and
-// sends to Consumer) if full.
-func (s *ringStripe) Push(item uint64) {
- s.data = append(s.data, item)
- // Decide if the ring buffer should be drained.
- if len(s.data) >= s.capa {
- // Send elements to consumer and create a new ring stripe.
- if s.cons.Push(s.data) {
- s.data = make([]uint64, 0, s.capa)
- } else {
- s.data = s.data[:0]
- }
- }
-}
-
-// ringBuffer stores multiple buffers (stripes) and distributes Pushed items
-// between them to lower contention.
-//
-// This implements the "batching" process described in the BP-Wrapper paper
-// (section III part A).
-type ringBuffer struct {
- pool *sync.Pool
-}
-
-// newRingBuffer returns a striped ring buffer. The Consumer in ringConfig will
-// be called when individual stripes are full and need to drain their elements.
-func newRingBuffer(cons ringConsumer, capa int64) *ringBuffer {
- // LOSSY buffers use a very simple sync.Pool for concurrently reusing
- // stripes. We do lose some stripes due to GC (unheld items in sync.Pool
- // are cleared), but the performance gains generally outweigh the small
- // percentage of elements lost. The performance primarily comes from
- // low-level runtime functions used in the standard library that aren't
- // available to us (such as runtime_procPin()).
- return &ringBuffer{
- pool: &sync.Pool{
- New: func() any { return newRingStripe(cons, capa) },
- },
- }
-}
-
-// Push adds an element to one of the internal stripes and possibly drains if
-// the stripe becomes full.
-func (b *ringBuffer) Push(item uint64) {
- // Reuse or create a new stripe.
- stripe := b.pool.Get().(*ringStripe)
- stripe.Push(item)
- b.pool.Put(stripe)
-}
diff --git a/go/cache/ristretto/ring_test.go b/go/cache/ristretto/ring_test.go
deleted file mode 100644
index 0dbe962ccc6..00000000000
--- a/go/cache/ristretto/ring_test.go
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright 2020 Dgraph Labs, Inc. and Contributors
- * Copyright 2021 The Vitess Authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package ristretto
-
-import (
- "sync"
- "testing"
-
- "github.com/stretchr/testify/require"
-)
-
-type testConsumer struct {
- push func([]uint64)
- save bool
-}
-
-func (c *testConsumer) Push(items []uint64) bool {
- if c.save {
- c.push(items)
- return true
- }
- return false
-}
-
-func TestRingDrain(t *testing.T) {
- drains := 0
- r := newRingBuffer(&testConsumer{
- push: func(items []uint64) {
- drains++
- },
- save: true,
- }, 1)
- for i := 0; i < 100; i++ {
- r.Push(uint64(i))
- }
- require.Equal(t, 100, drains, "buffers shouldn't be dropped with BufferItems == 1")
-}
-
-func TestRingReset(t *testing.T) {
- drains := 0
- r := newRingBuffer(&testConsumer{
- push: func(items []uint64) {
- drains++
- },
- save: false,
- }, 4)
- for i := 0; i < 100; i++ {
- r.Push(uint64(i))
- }
- require.Equal(t, 0, drains, "testConsumer shouldn't be draining")
-}
-
-func TestRingConsumer(t *testing.T) {
- mu := &sync.Mutex{}
- drainItems := make(map[uint64]struct{})
- r := newRingBuffer(&testConsumer{
- push: func(items []uint64) {
- mu.Lock()
- defer mu.Unlock()
- for i := range items {
- drainItems[items[i]] = struct{}{}
- }
- },
- save: true,
- }, 4)
- for i := 0; i < 100; i++ {
- r.Push(uint64(i))
- }
- l := len(drainItems)
- require.NotEqual(t, 0, l)
- require.True(t, l <= 100)
-}
diff --git a/go/cache/ristretto/sketch.go b/go/cache/ristretto/sketch.go
deleted file mode 100644
index ce0504a2a83..00000000000
--- a/go/cache/ristretto/sketch.go
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * Copyright 2019 Dgraph Labs, Inc. and Contributors
- * Copyright 2021 The Vitess Authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-// Package ristretto includes multiple probabalistic data structures needed for
-// admission/eviction metadata. Most are Counting Bloom Filter variations, but
-// a caching-specific feature that is also required is a "freshness" mechanism,
-// which basically serves as a "lifetime" process. This freshness mechanism
-// was described in the original TinyLFU paper [1], but other mechanisms may
-// be better suited for certain data distributions.
-//
-// [1]: https://arxiv.org/abs/1512.00727
-package ristretto
-
-import (
- "fmt"
- "math/rand"
- "time"
-)
-
-// cmSketch is a Count-Min sketch implementation with 4-bit counters, heavily
-// based on Damian Gryski's CM4 [1].
-//
-// [1]: https://github.com/dgryski/go-tinylfu/blob/master/cm4.go
-type cmSketch struct {
- rows [cmDepth]cmRow
- seed [cmDepth]uint64
- mask uint64
-}
-
-const (
- // cmDepth is the number of counter copies to store (think of it as rows).
- cmDepth = 4
-)
-
-func newCmSketch(numCounters int64) *cmSketch {
- if numCounters == 0 {
- panic("cmSketch: bad numCounters")
- }
- // Get the next power of 2 for better cache performance.
- numCounters = next2Power(numCounters)
- sketch := &cmSketch{mask: uint64(numCounters - 1)}
- // Initialize rows of counters and seeds.
- source := rand.New(rand.NewSource(time.Now().UnixNano()))
- for i := 0; i < cmDepth; i++ {
- sketch.seed[i] = source.Uint64()
- sketch.rows[i] = newCmRow(numCounters)
- }
- return sketch
-}
-
-// Increment increments the count(ers) for the specified key.
-func (s *cmSketch) Increment(hashed uint64) {
- for i := range s.rows {
- s.rows[i].increment((hashed ^ s.seed[i]) & s.mask)
- }
-}
-
-// Estimate returns the value of the specified key.
-func (s *cmSketch) Estimate(hashed uint64) int64 {
- min := byte(255)
- for i := range s.rows {
- val := s.rows[i].get((hashed ^ s.seed[i]) & s.mask)
- if val < min {
- min = val
- }
- }
- return int64(min)
-}
-
-// Reset halves all counter values.
-func (s *cmSketch) Reset() {
- for _, r := range s.rows {
- r.reset()
- }
-}
-
-// Clear zeroes all counters.
-func (s *cmSketch) Clear() {
- for _, r := range s.rows {
- r.clear()
- }
-}
-
-// cmRow is a row of bytes, with each byte holding two counters.
-type cmRow []byte
-
-func newCmRow(numCounters int64) cmRow {
- return make(cmRow, numCounters/2)
-}
-
-func (r cmRow) get(n uint64) byte {
- return byte(r[n/2]>>((n&1)*4)) & 0x0f
-}
-
-func (r cmRow) increment(n uint64) {
- // Index of the counter.
- i := n / 2
- // Shift distance (even 0, odd 4).
- s := (n & 1) * 4
- // Counter value.
- v := (r[i] >> s) & 0x0f
- // Only increment if not max value (overflow wrap is bad for LFU).
- if v < 15 {
- r[i] += 1 << s
- }
-}
-
-func (r cmRow) reset() {
- // Halve each counter.
- for i := range r {
- r[i] = (r[i] >> 1) & 0x77
- }
-}
-
-func (r cmRow) clear() {
- // Zero each counter.
- for i := range r {
- r[i] = 0
- }
-}
-
-func (r cmRow) string() string {
- s := ""
- for i := uint64(0); i < uint64(len(r)*2); i++ {
- s += fmt.Sprintf("%02d ", (r[(i/2)]>>((i&1)*4))&0x0f)
- }
- s = s[:len(s)-1]
- return s
-}
-
-// next2Power rounds x up to the next power of 2, if it's not already one.
-func next2Power(x int64) int64 {
- x--
- x |= x >> 1
- x |= x >> 2
- x |= x >> 4
- x |= x >> 8
- x |= x >> 16
- x |= x >> 32
- x++
- return x
-}
diff --git a/go/cache/ristretto/sketch_test.go b/go/cache/ristretto/sketch_test.go
deleted file mode 100644
index 03804a6d599..00000000000
--- a/go/cache/ristretto/sketch_test.go
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Copyright 2020 Dgraph Labs, Inc. and Contributors
- * Copyright 2021 The Vitess Authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package ristretto
-
-import (
- "testing"
-
- "vitess.io/vitess/go/vt/log"
-
- "github.com/stretchr/testify/require"
-)
-
-func TestSketch(t *testing.T) {
- defer func() {
- require.NotNil(t, recover())
- }()
-
- s := newCmSketch(5)
- require.Equal(t, uint64(7), s.mask)
- newCmSketch(0)
-}
-
-func TestSketchIncrement(t *testing.T) {
- s := newCmSketch(16)
- s.Increment(1)
- s.Increment(5)
- s.Increment(9)
- for i := 0; i < cmDepth; i++ {
- if s.rows[i].string() != s.rows[0].string() {
- break
- }
- require.False(t, i == cmDepth-1, "identical rows, bad seeding")
- }
-}
-
-func TestSketchEstimate(t *testing.T) {
- s := newCmSketch(16)
- s.Increment(1)
- s.Increment(1)
- require.Equal(t, int64(2), s.Estimate(1))
- require.Equal(t, int64(0), s.Estimate(0))
-}
-
-func TestSketchReset(t *testing.T) {
- s := newCmSketch(16)
- s.Increment(1)
- s.Increment(1)
- s.Increment(1)
- s.Increment(1)
- s.Reset()
- require.Equal(t, int64(2), s.Estimate(1))
-}
-
-func TestSketchClear(t *testing.T) {
- s := newCmSketch(16)
- for i := 0; i < 16; i++ {
- s.Increment(uint64(i))
- }
- s.Clear()
- for i := 0; i < 16; i++ {
- require.Equal(t, int64(0), s.Estimate(uint64(i)))
- }
-}
-
-func TestNext2Power(t *testing.T) {
- sz := 12 << 30
- szf := float64(sz) * 0.01
- val := int64(szf)
- log.Infof("szf = %.2f val = %d\n", szf, val)
- pow := next2Power(val)
- log.Infof("pow = %d. mult 4 = %d\n", pow, pow*4)
-}
-
-func BenchmarkSketchIncrement(b *testing.B) {
- s := newCmSketch(16)
- b.SetBytes(1)
- for n := 0; n < b.N; n++ {
- s.Increment(1)
- }
-}
-
-func BenchmarkSketchEstimate(b *testing.B) {
- s := newCmSketch(16)
- s.Increment(1)
- b.SetBytes(1)
- for n := 0; n < b.N; n++ {
- s.Estimate(1)
- }
-}
diff --git a/go/cache/ristretto/store.go b/go/cache/ristretto/store.go
deleted file mode 100644
index 0e455e7052f..00000000000
--- a/go/cache/ristretto/store.go
+++ /dev/null
@@ -1,240 +0,0 @@
-/*
- * Copyright 2019 Dgraph Labs, Inc. and Contributors
- * Copyright 2021 The Vitess Authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package ristretto
-
-import (
- "sync"
-)
-
-// TODO: Do we need this to be a separate struct from Item?
-type storeItem struct {
- key uint64
- conflict uint64
- value any
-}
-
-// store is the interface fulfilled by all hash map implementations in this
-// file. Some hash map implementations are better suited for certain data
-// distributions than others, so this allows us to abstract that out for use
-// in Ristretto.
-//
-// Every store is safe for concurrent usage.
-type store interface {
- // Get returns the value associated with the key parameter.
- Get(uint64, uint64) (any, bool)
- // Set adds the key-value pair to the Map or updates the value if it's
- // already present. The key-value pair is passed as a pointer to an
- // item object.
- Set(*Item)
- // Del deletes the key-value pair from the Map.
- Del(uint64, uint64) (uint64, any)
- // Update attempts to update the key with a new value and returns true if
- // successful.
- Update(*Item) (any, bool)
- // Clear clears all contents of the store.
- Clear(onEvict itemCallback)
- // ForEach yields all the values in the store
- ForEach(forEach func(any) bool)
- // Len returns the number of entries in the store
- Len() int
-}
-
-// newStore returns the default store implementation.
-func newStore() store {
- return newShardedMap()
-}
-
-const numShards uint64 = 256
-
-type shardedMap struct {
- shards []*lockedMap
-}
-
-func newShardedMap() *shardedMap {
- sm := &shardedMap{
- shards: make([]*lockedMap, int(numShards)),
- }
- for i := range sm.shards {
- sm.shards[i] = newLockedMap()
- }
- return sm
-}
-
-func (sm *shardedMap) Get(key, conflict uint64) (any, bool) {
- return sm.shards[key%numShards].get(key, conflict)
-}
-
-func (sm *shardedMap) Set(i *Item) {
- if i == nil {
- // If item is nil make this Set a no-op.
- return
- }
-
- sm.shards[i.Key%numShards].Set(i)
-}
-
-func (sm *shardedMap) Del(key, conflict uint64) (uint64, any) {
- return sm.shards[key%numShards].Del(key, conflict)
-}
-
-func (sm *shardedMap) Update(newItem *Item) (any, bool) {
- return sm.shards[newItem.Key%numShards].Update(newItem)
-}
-
-func (sm *shardedMap) ForEach(forEach func(any) bool) {
- for _, shard := range sm.shards {
- if !shard.foreach(forEach) {
- break
- }
- }
-}
-
-func (sm *shardedMap) Len() int {
- l := 0
- for _, shard := range sm.shards {
- l += shard.Len()
- }
- return l
-}
-
-func (sm *shardedMap) Clear(onEvict itemCallback) {
- for i := uint64(0); i < numShards; i++ {
- sm.shards[i].Clear(onEvict)
- }
-}
-
-type lockedMap struct {
- sync.RWMutex
- data map[uint64]storeItem
-}
-
-func newLockedMap() *lockedMap {
- return &lockedMap{
- data: make(map[uint64]storeItem),
- }
-}
-
-func (m *lockedMap) get(key, conflict uint64) (any, bool) {
- m.RLock()
- item, ok := m.data[key]
- m.RUnlock()
- if !ok {
- return nil, false
- }
- if conflict != 0 && (conflict != item.conflict) {
- return nil, false
- }
- return item.value, true
-}
-
-func (m *lockedMap) Set(i *Item) {
- if i == nil {
- // If the item is nil make this Set a no-op.
- return
- }
-
- m.Lock()
- defer m.Unlock()
- item, ok := m.data[i.Key]
-
- if ok {
- // The item existed already. We need to check the conflict key and reject the
- // update if they do not match. Only after that the expiration map is updated.
- if i.Conflict != 0 && (i.Conflict != item.conflict) {
- return
- }
- }
-
- m.data[i.Key] = storeItem{
- key: i.Key,
- conflict: i.Conflict,
- value: i.Value,
- }
-}
-
-func (m *lockedMap) Del(key, conflict uint64) (uint64, any) {
- m.Lock()
- item, ok := m.data[key]
- if !ok {
- m.Unlock()
- return 0, nil
- }
- if conflict != 0 && (conflict != item.conflict) {
- m.Unlock()
- return 0, nil
- }
-
- delete(m.data, key)
- m.Unlock()
- return item.conflict, item.value
-}
-
-func (m *lockedMap) Update(newItem *Item) (any, bool) {
- m.Lock()
- item, ok := m.data[newItem.Key]
- if !ok {
- m.Unlock()
- return nil, false
- }
- if newItem.Conflict != 0 && (newItem.Conflict != item.conflict) {
- m.Unlock()
- return nil, false
- }
-
- m.data[newItem.Key] = storeItem{
- key: newItem.Key,
- conflict: newItem.Conflict,
- value: newItem.Value,
- }
-
- m.Unlock()
- return item.value, true
-}
-
-func (m *lockedMap) Len() int {
- m.RLock()
- l := len(m.data)
- m.RUnlock()
- return l
-}
-
-func (m *lockedMap) Clear(onEvict itemCallback) {
- m.Lock()
- i := &Item{}
- if onEvict != nil {
- for _, si := range m.data {
- i.Key = si.key
- i.Conflict = si.conflict
- i.Value = si.value
- onEvict(i)
- }
- }
- m.data = make(map[uint64]storeItem)
- m.Unlock()
-}
-
-func (m *lockedMap) foreach(forEach func(any) bool) bool {
- m.RLock()
- defer m.RUnlock()
- for _, si := range m.data {
- if !forEach(si.value) {
- return false
- }
- }
- return true
-}
diff --git a/go/cache/ristretto/store_test.go b/go/cache/ristretto/store_test.go
deleted file mode 100644
index 54634736a72..00000000000
--- a/go/cache/ristretto/store_test.go
+++ /dev/null
@@ -1,224 +0,0 @@
-/*
- * Copyright 2020 Dgraph Labs, Inc. and Contributors
- * Copyright 2021 The Vitess Authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package ristretto
-
-import (
- "strconv"
- "testing"
-
- "github.com/stretchr/testify/require"
-)
-
-func TestStoreSetGet(t *testing.T) {
- s := newStore()
- key, conflict := defaultStringHash("1")
- i := Item{
- Key: key,
- Conflict: conflict,
- Value: 2,
- }
- s.Set(&i)
- val, ok := s.Get(key, conflict)
- require.True(t, ok)
- require.Equal(t, 2, val.(int))
-
- i.Value = 3
- s.Set(&i)
- val, ok = s.Get(key, conflict)
- require.True(t, ok)
- require.Equal(t, 3, val.(int))
-
- key, conflict = defaultStringHash("2")
- i = Item{
- Key: key,
- Conflict: conflict,
- Value: 2,
- }
- s.Set(&i)
- val, ok = s.Get(key, conflict)
- require.True(t, ok)
- require.Equal(t, 2, val.(int))
-}
-
-func TestStoreDel(t *testing.T) {
- s := newStore()
- key, conflict := defaultStringHash("1")
- i := Item{
- Key: key,
- Conflict: conflict,
- Value: 1,
- }
- s.Set(&i)
- s.Del(key, conflict)
- val, ok := s.Get(key, conflict)
- require.False(t, ok)
- require.Nil(t, val)
-
- s.Del(2, 0)
-}
-
-func TestStoreClear(t *testing.T) {
- s := newStore()
- for i := 0; i < 1000; i++ {
- key, conflict := defaultStringHash(strconv.Itoa(i))
- it := Item{
- Key: key,
- Conflict: conflict,
- Value: i,
- }
- s.Set(&it)
- }
- s.Clear(nil)
- for i := 0; i < 1000; i++ {
- key, conflict := defaultStringHash(strconv.Itoa(i))
- val, ok := s.Get(key, conflict)
- require.False(t, ok)
- require.Nil(t, val)
- }
-}
-
-func TestStoreUpdate(t *testing.T) {
- s := newStore()
- key, conflict := defaultStringHash("1")
- i := Item{
- Key: key,
- Conflict: conflict,
- Value: 1,
- }
- s.Set(&i)
- i.Value = 2
- _, ok := s.Update(&i)
- require.True(t, ok)
-
- val, ok := s.Get(key, conflict)
- require.True(t, ok)
- require.NotNil(t, val)
-
- val, ok = s.Get(key, conflict)
- require.True(t, ok)
- require.Equal(t, 2, val.(int))
-
- i.Value = 3
- _, ok = s.Update(&i)
- require.True(t, ok)
-
- val, ok = s.Get(key, conflict)
- require.True(t, ok)
- require.Equal(t, 3, val.(int))
-
- key, conflict = defaultStringHash("2")
- i = Item{
- Key: key,
- Conflict: conflict,
- Value: 2,
- }
- _, ok = s.Update(&i)
- require.False(t, ok)
- val, ok = s.Get(key, conflict)
- require.False(t, ok)
- require.Nil(t, val)
-}
-
-func TestStoreCollision(t *testing.T) {
- s := newShardedMap()
- s.shards[1].Lock()
- s.shards[1].data[1] = storeItem{
- key: 1,
- conflict: 0,
- value: 1,
- }
- s.shards[1].Unlock()
- val, ok := s.Get(1, 1)
- require.False(t, ok)
- require.Nil(t, val)
-
- i := Item{
- Key: 1,
- Conflict: 1,
- Value: 2,
- }
- s.Set(&i)
- val, ok = s.Get(1, 0)
- require.True(t, ok)
- require.NotEqual(t, 2, val.(int))
-
- _, ok = s.Update(&i)
- require.False(t, ok)
- val, ok = s.Get(1, 0)
- require.True(t, ok)
- require.NotEqual(t, 2, val.(int))
-
- s.Del(1, 1)
- val, ok = s.Get(1, 0)
- require.True(t, ok)
- require.NotNil(t, val)
-}
-
-func BenchmarkStoreGet(b *testing.B) {
- s := newStore()
- key, conflict := defaultStringHash("1")
- i := Item{
- Key: key,
- Conflict: conflict,
- Value: 1,
- }
- s.Set(&i)
- b.SetBytes(1)
- b.RunParallel(func(pb *testing.PB) {
- for pb.Next() {
- s.Get(key, conflict)
- }
- })
-}
-
-func BenchmarkStoreSet(b *testing.B) {
- s := newStore()
- key, conflict := defaultStringHash("1")
- b.SetBytes(1)
- b.RunParallel(func(pb *testing.PB) {
- for pb.Next() {
- i := Item{
- Key: key,
- Conflict: conflict,
- Value: 1,
- }
- s.Set(&i)
- }
- })
-}
-
-func BenchmarkStoreUpdate(b *testing.B) {
- s := newStore()
- key, conflict := defaultStringHash("1")
- i := Item{
- Key: key,
- Conflict: conflict,
- Value: 1,
- }
- s.Set(&i)
- b.SetBytes(1)
- b.RunParallel(func(pb *testing.PB) {
- for pb.Next() {
- s.Update(&Item{
- Key: key,
- Conflict: conflict,
- Value: 2,
- })
- }
- })
-}
diff --git a/go/cache/theine/LICENSE b/go/cache/theine/LICENSE
new file mode 100644
index 00000000000..0161260b7b6
--- /dev/null
+++ b/go/cache/theine/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2023 Yiling-J
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/go/cache/theine/bf/bf.go b/go/cache/theine/bf/bf.go
new file mode 100644
index 00000000000..f68e34d81e3
--- /dev/null
+++ b/go/cache/theine/bf/bf.go
@@ -0,0 +1,116 @@
+package bf
+
+import (
+ "math"
+)
+
+// doorkeeper is a small bloom-filter-based cache admission policy
+type Bloomfilter struct {
+ Filter bitvector // our filter bit vector
+ M uint32 // size of bit vector in bits
+ K uint32 // distinct hash functions needed
+ FalsePositiveRate float64
+ Capacity int
+}
+
+func New(falsePositiveRate float64) *Bloomfilter {
+ d := &Bloomfilter{FalsePositiveRate: falsePositiveRate}
+ d.EnsureCapacity(320)
+ return d
+}
+
+// create new bloomfilter with given size in bytes
+func NewWithSize(size uint32) *Bloomfilter {
+ d := &Bloomfilter{}
+ bits := size * 8
+ m := nextPowerOfTwo(uint32(bits))
+ d.M = m
+ d.Filter = newbv(m)
+ return d
+}
+
+func (d *Bloomfilter) EnsureCapacity(capacity int) {
+ if capacity <= d.Capacity {
+ return
+ }
+ capacity = int(nextPowerOfTwo(uint32(capacity)))
+ bits := float64(capacity) * -math.Log(d.FalsePositiveRate) / (math.Log(2.0) * math.Log(2.0)) // in bits
+ m := nextPowerOfTwo(uint32(bits))
+
+ if m < 1024 {
+ m = 1024
+ }
+
+ k := uint32(0.7 * float64(m) / float64(capacity))
+ if k < 2 {
+ k = 2
+ }
+ d.Capacity = capacity
+ d.M = m
+ d.Filter = newbv(m)
+ d.K = k
+}
+
+func (d *Bloomfilter) Exist(h uint64) bool {
+ h1, h2 := uint32(h), uint32(h>>32)
+ var o uint = 1
+ for i := uint32(0); i < d.K; i++ {
+ o &= d.Filter.get((h1 + (i * h2)) & (d.M - 1))
+ }
+ return o == 1
+}
+
+// insert inserts the byte array b into the bloom filter. Returns true if the value
+// was already considered to be in the bloom filter.
+func (d *Bloomfilter) Insert(h uint64) bool {
+ h1, h2 := uint32(h), uint32(h>>32)
+ var o uint = 1
+ for i := uint32(0); i < d.K; i++ {
+ o &= d.Filter.getset((h1 + (i * h2)) & (d.M - 1))
+ }
+ return o == 1
+}
+
+// Reset clears the bloom filter
+func (d *Bloomfilter) Reset() {
+ for i := range d.Filter {
+ d.Filter[i] = 0
+ }
+}
+
+// Internal routines for the bit vector
+type bitvector []uint64
+
+func newbv(size uint32) bitvector {
+ return make([]uint64, uint(size+63)/64)
+}
+
+func (b bitvector) get(bit uint32) uint {
+ shift := bit % 64
+ idx := bit / 64
+ bb := b[idx]
+ m := uint64(1) << shift
+ return uint((bb & m) >> shift)
+}
+
+// set bit 'bit' in the bitvector d and return previous value
+func (b bitvector) getset(bit uint32) uint {
+ shift := bit % 64
+ idx := bit / 64
+ bb := b[idx]
+ m := uint64(1) << shift
+ b[idx] |= m
+ return uint((bb & m) >> shift)
+}
+
+// return the integer >= i which is a power of two
+func nextPowerOfTwo(i uint32) uint32 {
+ n := i - 1
+ n |= n >> 1
+ n |= n >> 2
+ n |= n >> 4
+ n |= n >> 8
+ n |= n >> 16
+ n++
+ return n
+}
diff --git a/go/cache/theine/bf/bf_test.go b/go/cache/theine/bf/bf_test.go
new file mode 100644
index 00000000000..f0e505766e7
--- /dev/null
+++ b/go/cache/theine/bf/bf_test.go
@@ -0,0 +1,24 @@
+package bf
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestBloom(t *testing.T) {
+ bf := NewWithSize(5)
+ bf.FalsePositiveRate = 0.1
+ bf.EnsureCapacity(5)
+ bf.EnsureCapacity(500)
+ bf.EnsureCapacity(200)
+
+ exist := bf.Insert(123)
+ require.False(t, exist)
+
+ exist = bf.Exist(123)
+ require.True(t, exist)
+
+ exist = bf.Exist(456)
+ require.False(t, exist)
+}
diff --git a/go/cache/theine/entry.go b/go/cache/theine/entry.go
new file mode 100644
index 00000000000..48e3bd5a09a
--- /dev/null
+++ b/go/cache/theine/entry.go
@@ -0,0 +1,93 @@
+/*
+Copyright 2023 The Vitess Authors.
+Copyright 2023 Yiling-J
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package theine
+
+import "sync/atomic"
+
+const (
+ NEW int8 = iota
+ REMOVE
+ UPDATE
+)
+
+type ReadBufItem[K cachekey, V any] struct {
+ entry *Entry[K, V]
+ hash uint64
+}
+type WriteBufItem[K cachekey, V any] struct {
+ entry *Entry[K, V]
+ costChange int64
+ code int8
+}
+
+type MetaData[K cachekey, V any] struct {
+ prev *Entry[K, V]
+ next *Entry[K, V]
+}
+
+type Entry[K cachekey, V any] struct {
+ key K
+ value V
+ meta MetaData[K, V]
+ cost atomic.Int64
+ frequency atomic.Int32
+ epoch atomic.Uint32
+ removed bool
+ deque bool
+ root bool
+ list uint8 // used in slru, probation or protected
+}
+
+func NewEntry[K cachekey, V any](key K, value V, cost int64) *Entry[K, V] {
+ entry := &Entry[K, V]{
+ key: key,
+ value: value,
+ }
+ entry.cost.Store(cost)
+ return entry
+}
+
+func (e *Entry[K, V]) Next() *Entry[K, V] {
+ if p := e.meta.next; !p.root {
+ return e.meta.next
+ }
+ return nil
+}
+
+func (e *Entry[K, V]) Prev() *Entry[K, V] {
+ if p := e.meta.prev; !p.root {
+ return e.meta.prev
+ }
+ return nil
+}
+
+func (e *Entry[K, V]) prev() *Entry[K, V] {
+ return e.meta.prev
+}
+
+func (e *Entry[K, V]) next() *Entry[K, V] {
+ return e.meta.next
+}
+
+func (e *Entry[K, V]) setPrev(entry *Entry[K, V]) {
+ e.meta.prev = entry
+}
+
+func (e *Entry[K, V]) setNext(entry *Entry[K, V]) {
+ e.meta.next = entry
+}
diff --git a/go/cache/theine/list.go b/go/cache/theine/list.go
new file mode 100644
index 00000000000..19854190cba
--- /dev/null
+++ b/go/cache/theine/list.go
@@ -0,0 +1,205 @@
+/*
+Copyright 2023 The Vitess Authors.
+Copyright 2023 Yiling-J
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package theine
+
+import (
+ "fmt"
+ "strings"
+)
+
+const (
+ LIST_PROBATION uint8 = 1
+ LIST_PROTECTED uint8 = 2
+)
+
+// List represents a doubly linked list.
+// The zero value for List is an empty list ready to use.
+type List[K cachekey, V any] struct {
+ root Entry[K, V] // sentinel list element, only &root, root.prev, and root.next are used
+ len int // current list length(sum of costs) excluding (this) sentinel element
+ count int // count of entries in list
+ capacity uint
+ bounded bool
+ listType uint8 // 1 tinylfu list, 2 timerwheel list
+}
+
+// New returns an initialized list.
+func NewList[K cachekey, V any](size uint, listType uint8) *List[K, V] {
+ l := &List[K, V]{listType: listType, capacity: size, root: Entry[K, V]{}}
+ l.root.root = true
+ l.root.setNext(&l.root)
+ l.root.setPrev(&l.root)
+ l.len = 0
+ l.capacity = size
+ if size > 0 {
+ l.bounded = true
+ }
+ return l
+}
+
+func (l *List[K, V]) Reset() {
+ l.root.setNext(&l.root)
+ l.root.setPrev(&l.root)
+ l.len = 0
+}
+
+// Len returns the number of elements of list l.
+// The complexity is O(1).
+func (l *List[K, V]) Len() int { return l.len }
+
+func (l *List[K, V]) display() string {
+ var s []string
+ for e := l.Front(); e != nil; e = e.Next() {
+ s = append(s, fmt.Sprintf("%v", e.key))
+ }
+ return strings.Join(s, "/")
+}
+
+func (l *List[K, V]) displayReverse() string {
+ var s []string
+ for e := l.Back(); e != nil; e = e.Prev() {
+ s = append(s, fmt.Sprintf("%v", e.key))
+ }
+ return strings.Join(s, "/")
+}
+
+// Front returns the first element of list l or nil if the list is empty.
+func (l *List[K, V]) Front() *Entry[K, V] {
+ e := l.root.next()
+ if e != &l.root {
+ return e
+ }
+ return nil
+}
+
+// Back returns the last element of list l or nil if the list is empty.
+func (l *List[K, V]) Back() *Entry[K, V] {
+ e := l.root.prev()
+ if e != &l.root {
+ return e
+ }
+ return nil
+}
+
+// insert inserts e after at, increments l.len, and evicted entry if capacity exceed
+func (l *List[K, V]) insert(e, at *Entry[K, V]) *Entry[K, V] {
+ var evicted *Entry[K, V]
+ if l.bounded && l.len >= int(l.capacity) {
+ evicted = l.PopTail()
+ }
+ e.list = l.listType
+ e.setPrev(at)
+ e.setNext(at.next())
+ e.prev().setNext(e)
+ e.next().setPrev(e)
+ if l.bounded {
+ l.len += int(e.cost.Load())
+ l.count += 1
+ }
+ return evicted
+}
+
+// PushFront push entry to list head
+func (l *List[K, V]) PushFront(e *Entry[K, V]) *Entry[K, V] {
+ return l.insert(e, &l.root)
+}
+
+// Push push entry to the back of list
+func (l *List[K, V]) PushBack(e *Entry[K, V]) *Entry[K, V] {
+ return l.insert(e, l.root.prev())
+}
+
+// remove removes e from its list, decrements l.len
+func (l *List[K, V]) remove(e *Entry[K, V]) {
+ e.prev().setNext(e.next())
+ e.next().setPrev(e.prev())
+ e.setNext(nil)
+ e.setPrev(nil)
+ e.list = 0
+ if l.bounded {
+ l.len -= int(e.cost.Load())
+ l.count -= 1
+ }
+}
+
+// move moves e to next to at.
+func (l *List[K, V]) move(e, at *Entry[K, V]) {
+ if e == at {
+ return
+ }
+ e.prev().setNext(e.next())
+ e.next().setPrev(e.prev())
+
+ e.setPrev(at)
+ e.setNext(at.next())
+ e.prev().setNext(e)
+ e.next().setPrev(e)
+}
+
+// Remove removes e from l if e is an element of list l.
+// It returns the element value e.Value.
+// The element must not be nil.
+func (l *List[K, V]) Remove(e *Entry[K, V]) {
+ l.remove(e)
+}
+
+// MoveToFront moves element e to the front of list l.
+// If e is not an element of l, the list is not modified.
+// The element must not be nil.
+func (l *List[K, V]) MoveToFront(e *Entry[K, V]) {
+ l.move(e, &l.root)
+}
+
+// MoveToBack moves element e to the back of list l.
+// If e is not an element of l, the list is not modified.
+// The element must not be nil.
+func (l *List[K, V]) MoveToBack(e *Entry[K, V]) {
+ l.move(e, l.root.prev())
+}
+
+// MoveBefore moves element e to its new position before mark.
+// If e or mark is not an element of l, or e == mark, the list is not modified.
+// The element and mark must not be nil.
+func (l *List[K, V]) MoveBefore(e, mark *Entry[K, V]) {
+ l.move(e, mark.prev())
+}
+
+// MoveAfter moves element e to its new position after mark.
+// If e or mark is not an element of l, or e == mark, the list is not modified.
+// The element and mark must not be nil.
+func (l *List[K, V]) MoveAfter(e, mark *Entry[K, V]) {
+ l.move(e, mark)
+}
+
+func (l *List[K, V]) PopTail() *Entry[K, V] {
+ entry := l.root.prev()
+ if entry != nil && entry != &l.root {
+ l.remove(entry)
+ return entry
+ }
+ return nil
+}
+
+func (l *List[K, V]) Contains(entry *Entry[K, V]) bool {
+ for e := l.Front(); e != nil; e = e.Next() {
+ if e == entry {
+ return true
+ }
+ }
+ return false
+}
diff --git a/go/cache/theine/list_test.go b/go/cache/theine/list_test.go
new file mode 100644
index 00000000000..aad68f5c142
--- /dev/null
+++ b/go/cache/theine/list_test.go
@@ -0,0 +1,91 @@
+/*
+Copyright 2023 The Vitess Authors.
+Copyright 2023 Yiling-J
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package theine
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestList(t *testing.T) {
+ l := NewList[StringKey, string](5, LIST_PROBATION)
+ require.Equal(t, uint(5), l.capacity)
+ require.Equal(t, LIST_PROBATION, l.listType)
+ for i := 0; i < 5; i++ {
+ evicted := l.PushFront(NewEntry(StringKey(fmt.Sprintf("%d", i)), "", 1))
+ require.Nil(t, evicted)
+ }
+ require.Equal(t, 5, l.len)
+ require.Equal(t, "4/3/2/1/0", l.display())
+ require.Equal(t, "0/1/2/3/4", l.displayReverse())
+
+ evicted := l.PushFront(NewEntry(StringKey("5"), "", 1))
+ require.Equal(t, StringKey("0"), evicted.key)
+ require.Equal(t, 5, l.len)
+ require.Equal(t, "5/4/3/2/1", l.display())
+ require.Equal(t, "1/2/3/4/5", l.displayReverse())
+
+ for i := 0; i < 5; i++ {
+ entry := l.PopTail()
+ require.Equal(t, StringKey(fmt.Sprintf("%d", i+1)), entry.key)
+ }
+ entry := l.PopTail()
+ require.Nil(t, entry)
+
+ var entries []*Entry[StringKey, string]
+ for i := 0; i < 5; i++ {
+ new := NewEntry(StringKey(fmt.Sprintf("%d", i)), "", 1)
+ evicted := l.PushFront(new)
+ entries = append(entries, new)
+ require.Nil(t, evicted)
+ }
+ require.Equal(t, "4/3/2/1/0", l.display())
+ l.MoveToBack(entries[2])
+ require.Equal(t, "4/3/1/0/2", l.display())
+ require.Equal(t, "2/0/1/3/4", l.displayReverse())
+ l.MoveBefore(entries[1], entries[3])
+ require.Equal(t, "4/1/3/0/2", l.display())
+ require.Equal(t, "2/0/3/1/4", l.displayReverse())
+ l.MoveAfter(entries[2], entries[4])
+ require.Equal(t, "4/2/1/3/0", l.display())
+ require.Equal(t, "0/3/1/2/4", l.displayReverse())
+ l.Remove(entries[1])
+ require.Equal(t, "4/2/3/0", l.display())
+ require.Equal(t, "0/3/2/4", l.displayReverse())
+
+}
+
+func TestListCountCost(t *testing.T) {
+ l := NewList[StringKey, string](100, LIST_PROBATION)
+ require.Equal(t, uint(100), l.capacity)
+ require.Equal(t, LIST_PROBATION, l.listType)
+ for i := 0; i < 5; i++ {
+ evicted := l.PushFront(NewEntry(StringKey(fmt.Sprintf("%d", i)), "", 20))
+ require.Nil(t, evicted)
+ }
+ require.Equal(t, 100, l.len)
+ require.Equal(t, 5, l.count)
+ for i := 0; i < 3; i++ {
+ entry := l.PopTail()
+ require.NotNil(t, entry)
+ }
+ require.Equal(t, 40, l.len)
+ require.Equal(t, 2, l.count)
+}
diff --git a/go/cache/theine/mpsc.go b/go/cache/theine/mpsc.go
new file mode 100644
index 00000000000..c00e2ce5a26
--- /dev/null
+++ b/go/cache/theine/mpsc.go
@@ -0,0 +1,86 @@
+/*
+Copyright 2023 The Vitess Authors.
+Copyright 2023 Yiling-J
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package theine
+
+// This implementation is based on http://www.1024cores.net/home/lock-free-algorithms/queues/non-intrusive-mpsc-node-based-queue
+
+import (
+ "sync"
+ "sync/atomic"
+)
+
+type node[V any] struct {
+ next atomic.Pointer[node[V]]
+ val V
+}
+
+type Queue[V any] struct {
+ head, tail atomic.Pointer[node[V]]
+ nodePool sync.Pool
+}
+
+func NewQueue[V any]() *Queue[V] {
+ q := &Queue[V]{nodePool: sync.Pool{New: func() any {
+ return new(node[V])
+ }}}
+ stub := &node[V]{}
+ q.head.Store(stub)
+ q.tail.Store(stub)
+ return q
+}
+
+// Push adds x to the back of the queue.
+//
+// Push can be safely called from multiple goroutines
+func (q *Queue[V]) Push(x V) {
+ n := q.nodePool.Get().(*node[V])
+ n.val = x
+
+ // current producer acquires head node
+ prev := q.head.Swap(n)
+
+ // release node to consumer
+ prev.next.Store(n)
+}
+
+// Pop removes the item from the front of the queue or nil if the queue is empty
+//
+// Pop must be called from a single, consumer goroutine
+func (q *Queue[V]) Pop() (V, bool) {
+ tail := q.tail.Load()
+ next := tail.next.Load()
+ if next != nil {
+ var null V
+ q.tail.Store(next)
+ v := next.val
+ next.val = null
+ tail.next.Store(nil)
+ q.nodePool.Put(tail)
+ return v, true
+ }
+ var null V
+ return null, false
+}
+
+// Empty returns true if the queue is empty
+//
+// Empty must be called from a single, consumer goroutine
+func (q *Queue[V]) Empty() bool {
+ tail := q.tail.Load()
+ return tail.next.Load() == nil
+}
diff --git a/go/cache/perf_test.go b/go/cache/theine/mpsc_test.go
similarity index 53%
rename from go/cache/perf_test.go
rename to go/cache/theine/mpsc_test.go
index 693e55238a0..eca50efed3e 100644
--- a/go/cache/perf_test.go
+++ b/go/cache/theine/mpsc_test.go
@@ -1,5 +1,6 @@
/*
-Copyright 2019 The Vitess Authors.
+Copyright 2023 The Vitess Authors.
+Copyright 2023 Yiling-J
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,23 +15,32 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package cache
+package theine
import (
"testing"
+
+ "github.com/stretchr/testify/assert"
)
-func BenchmarkGet(b *testing.B) {
- cache := NewLRUCache(64*1024*1024, func(val any) int64 {
- return int64(cap(val.([]byte)))
- })
- value := make([]byte, 1000)
- cache.Set("stuff", value)
- for i := 0; i < b.N; i++ {
- val, ok := cache.Get("stuff")
- if !ok {
- panic("error")
- }
- _ = val
- }
+func TestQueue_PushPop(t *testing.T) {
+ q := NewQueue[int]()
+
+ q.Push(1)
+ q.Push(2)
+ v, ok := q.Pop()
+ assert.True(t, ok)
+ assert.Equal(t, 1, v)
+ v, ok = q.Pop()
+ assert.True(t, ok)
+ assert.Equal(t, 2, v)
+ _, ok = q.Pop()
+ assert.False(t, ok)
+}
+
+func TestQueue_Empty(t *testing.T) {
+ q := NewQueue[int]()
+ assert.True(t, q.Empty())
+ q.Push(1)
+ assert.False(t, q.Empty())
}
diff --git a/go/cache/theine/singleflight.go b/go/cache/theine/singleflight.go
new file mode 100644
index 00000000000..fde56670514
--- /dev/null
+++ b/go/cache/theine/singleflight.go
@@ -0,0 +1,196 @@
+/*
+Copyright 2023 The Vitess Authors.
+Copyright 2023 Yiling-J
+Copyright 2013 The Go Authors. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package theine
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "runtime"
+ "runtime/debug"
+ "sync"
+ "sync/atomic"
+)
+
+// errGoexit indicates the runtime.Goexit was called in
+// the user given function.
+var errGoexit = errors.New("runtime.Goexit was called")
+
+// A panicError is an arbitrary value recovered from a panic
+// with the stack trace during the execution of given function.
+type panicError struct {
+ value interface{}
+ stack []byte
+}
+
+// Error implements error interface.
+func (p *panicError) Error() string {
+ return fmt.Sprintf("%v\n\n%s", p.value, p.stack)
+}
+
+func newPanicError(v interface{}) error {
+ stack := debug.Stack()
+
+ // The first line of the stack trace is of the form "goroutine N [status]:"
+ // but by the time the panic reaches Do the goroutine may no longer exist
+ // and its status will have changed. Trim out the misleading line.
+ if line := bytes.IndexByte(stack[:], '\n'); line >= 0 {
+ stack = stack[line+1:]
+ }
+ return &panicError{value: v, stack: stack}
+}
+
+// call is an in-flight or completed singleflight.Do call
+type call[V any] struct {
+
+ // These fields are written once before the WaitGroup is done
+ // and are only read after the WaitGroup is done.
+ val V
+ err error
+
+ wg sync.WaitGroup
+
+ // These fields are read and written with the singleflight
+ // mutex held before the WaitGroup is done, and are read but
+ // not written after the WaitGroup is done.
+ dups atomic.Int32
+}
+
+// Group represents a class of work and forms a namespace in
+// which units of work can be executed with duplicate suppression.
+type Group[K comparable, V any] struct {
+ m map[K]*call[V] // lazily initialized
+ mu sync.Mutex // protects m
+ callPool sync.Pool
+}
+
+func NewGroup[K comparable, V any]() *Group[K, V] {
+ return &Group[K, V]{
+ callPool: sync.Pool{New: func() any {
+ return new(call[V])
+ }},
+ }
+}
+
+// Result holds the results of Do, so they can be passed
+// on a channel.
+type Result struct {
+ Val interface{}
+ Err error
+ Shared bool
+}
+
+// Do executes and returns the results of the given function, making
+// sure that only one execution is in-flight for a given key at a
+// time. If a duplicate comes in, the duplicate caller waits for the
+// original to complete and receives the same results.
+// The return value shared indicates whether v was given to multiple callers.
+func (g *Group[K, V]) Do(key K, fn func() (V, error)) (v V, err error, shared bool) {
+ g.mu.Lock()
+ if g.m == nil {
+ g.m = make(map[K]*call[V])
+ }
+ if c, ok := g.m[key]; ok {
+ _ = c.dups.Add(1)
+ g.mu.Unlock()
+ c.wg.Wait()
+
+ if e, ok := c.err.(*panicError); ok {
+ panic(e)
+ } else if c.err == errGoexit {
+ runtime.Goexit()
+ }
+ // assign value/err before put back to pool to avoid race
+ v = c.val
+ err = c.err
+ n := c.dups.Add(-1)
+ if n == 0 {
+ g.callPool.Put(c)
+ }
+ return v, err, true
+ }
+ c := g.callPool.Get().(*call[V])
+ defer func() {
+ n := c.dups.Add(-1)
+ if n == 0 {
+ g.callPool.Put(c)
+ }
+ }()
+ _ = c.dups.Add(1)
+ c.wg.Add(1)
+ g.m[key] = c
+ g.mu.Unlock()
+
+ g.doCall(c, key, fn)
+ return c.val, c.err, true
+}
+
+// doCall handles the single call for a key.
+func (g *Group[K, V]) doCall(c *call[V], key K, fn func() (V, error)) {
+ normalReturn := false
+ recovered := false
+
+ // use double-defer to distinguish panic from runtime.Goexit,
+ // more details see https://golang.org/cl/134395
+ defer func() {
+ // the given function invoked runtime.Goexit
+ if !normalReturn && !recovered {
+ c.err = errGoexit
+ }
+
+ g.mu.Lock()
+ defer g.mu.Unlock()
+ c.wg.Done()
+ if g.m[key] == c {
+ delete(g.m, key)
+ }
+
+ if e, ok := c.err.(*panicError); ok {
+ panic(e)
+ }
+ }()
+
+ func() {
+ defer func() {
+ if !normalReturn {
+ // Ideally, we would wait to take a stack trace until we've determined
+ // whether this is a panic or a runtime.Goexit.
+ //
+ // Unfortunately, the only way we can distinguish the two is to see
+ // whether the recover stopped the goroutine from terminating, and by
+ // the time we know that, the part of the stack trace relevant to the
+ // panic has been discarded.
+ if r := recover(); r != nil {
+ c.err = newPanicError(r)
+ }
+ }
+ }()
+
+ c.val, c.err = fn()
+ normalReturn = true
+ }()
+
+ if !normalReturn {
+ recovered = true
+ }
+}
diff --git a/go/cache/theine/singleflight_test.go b/go/cache/theine/singleflight_test.go
new file mode 100644
index 00000000000..60b28e69b4e
--- /dev/null
+++ b/go/cache/theine/singleflight_test.go
@@ -0,0 +1,211 @@
+/*
+Copyright 2023 The Vitess Authors.
+Copyright 2023 Yiling-J
+Copyright 2013 The Go Authors. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package theine
+
+import (
+ "crypto/rand"
+ "errors"
+ "fmt"
+ "io"
+ "runtime"
+ "sync"
+ "sync/atomic"
+ "testing"
+ "time"
+)
+
+func TestDo(t *testing.T) {
+ g := NewGroup[string, string]()
+ v, err, _ := g.Do("key", func() (string, error) {
+ return "bar", nil
+ })
+ if got, want := fmt.Sprintf("%v (%T)", v, v), "bar (string)"; got != want {
+ t.Errorf("Do = %v; want %v", got, want)
+ }
+ if err != nil {
+ t.Errorf("Do error = %v", err)
+ }
+}
+
+func TestDoErr(t *testing.T) {
+ g := NewGroup[string, string]()
+ someErr := errors.New("Some error")
+ v, err, _ := g.Do("key", func() (string, error) {
+ return "", someErr
+ })
+ if err != someErr {
+ t.Errorf("Do error = %v; want someErr %v", err, someErr)
+ }
+ if v != "" {
+ t.Errorf("unexpected non-nil value %#v", v)
+ }
+}
+
+func TestDoDupSuppress(t *testing.T) {
+ g := NewGroup[string, string]()
+ var wg1, wg2 sync.WaitGroup
+ c := make(chan string, 1)
+ var calls int32
+ fn := func() (string, error) {
+ if atomic.AddInt32(&calls, 1) == 1 {
+ // First invocation.
+ wg1.Done()
+ }
+ v := <-c
+ c <- v // pump; make available for any future calls
+
+ time.Sleep(10 * time.Millisecond) // let more goroutines enter Do
+
+ return v, nil
+ }
+
+ const n = 10
+ wg1.Add(1)
+ for i := 0; i < n; i++ {
+ wg1.Add(1)
+ wg2.Add(1)
+ go func() {
+ defer wg2.Done()
+ wg1.Done()
+ v, err, _ := g.Do("key", fn)
+ if err != nil {
+ t.Errorf("Do error: %v", err)
+ return
+ }
+ if s := v; s != "bar" {
+ t.Errorf("Do = %T %v; want %q", v, v, "bar")
+ }
+ }()
+ }
+ wg1.Wait()
+ // At least one goroutine is in fn now and all of them have at
+ // least reached the line before the Do.
+ c <- "bar"
+ wg2.Wait()
+ if got := atomic.LoadInt32(&calls); got <= 0 || got >= n {
+ t.Errorf("number of calls = %d; want over 0 and less than %d", got, n)
+ }
+}
+
+// Test singleflight behaves correctly after Do panic.
+// See https://github.com/golang/go/issues/41133
+func TestPanicDo(t *testing.T) {
+ g := NewGroup[string, string]()
+ fn := func() (string, error) {
+ panic("invalid memory address or nil pointer dereference")
+ }
+
+ const n = 5
+ waited := int32(n)
+ panicCount := int32(0)
+ done := make(chan struct{})
+ for i := 0; i < n; i++ {
+ go func() {
+ defer func() {
+ if err := recover(); err != nil {
+ atomic.AddInt32(&panicCount, 1)
+ }
+
+ if atomic.AddInt32(&waited, -1) == 0 {
+ close(done)
+ }
+ }()
+
+ _, _, _ = g.Do("key", fn)
+ }()
+ }
+
+ select {
+ case <-done:
+ if panicCount != n {
+ t.Errorf("Expect %d panic, but got %d", n, panicCount)
+ }
+ case <-time.After(time.Second):
+ t.Fatalf("Do hangs")
+ }
+}
+
+func TestGoexitDo(t *testing.T) {
+ g := NewGroup[string, int]()
+ fn := func() (int, error) {
+ runtime.Goexit()
+ return 0, nil
+ }
+
+ const n = 5
+ waited := int32(n)
+ done := make(chan struct{})
+ for i := 0; i < n; i++ {
+ go func() {
+ var err error
+ defer func() {
+ if err != nil {
+ t.Errorf("Error should be nil, but got: %v", err)
+ }
+ if atomic.AddInt32(&waited, -1) == 0 {
+ close(done)
+ }
+ }()
+ _, err, _ = g.Do("key", fn)
+ }()
+ }
+
+ select {
+ case <-done:
+ case <-time.After(time.Second):
+ t.Fatalf("Do hangs")
+ }
+}
+
+func BenchmarkDo(b *testing.B) {
+ keys := randKeys(b, 10240, 10)
+ benchDo(b, NewGroup[string, int](), keys)
+
+}
+
+func benchDo(b *testing.B, g *Group[string, int], keys []string) {
+ keyc := len(keys)
+ b.ReportAllocs()
+ b.ResetTimer()
+
+ b.RunParallel(func(pb *testing.PB) {
+ for i := 0; pb.Next(); i++ {
+ _, _, _ = g.Do(keys[i%keyc], func() (int, error) {
+ return 0, nil
+ })
+ }
+ })
+}
+
+func randKeys(b *testing.B, count, length uint) []string {
+ keys := make([]string, 0, count)
+ key := make([]byte, length)
+
+ for i := uint(0); i < count; i++ {
+ if _, err := io.ReadFull(rand.Reader, key); err != nil {
+ b.Fatalf("Failed to generate random key %d of %d of length %d: %s", i+1, count, length, err)
+ }
+ keys = append(keys, string(key))
+ }
+ return keys
+}
diff --git a/go/cache/theine/sketch.go b/go/cache/theine/sketch.go
new file mode 100644
index 00000000000..7d241d94fc8
--- /dev/null
+++ b/go/cache/theine/sketch.go
@@ -0,0 +1,137 @@
+/*
+Copyright 2023 The Vitess Authors.
+Copyright 2023 Yiling-J
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package theine
+
+type CountMinSketch struct {
+ Table []uint64
+ Additions uint
+ SampleSize uint
+ BlockMask uint
+}
+
+func NewCountMinSketch() *CountMinSketch {
+ new := &CountMinSketch{}
+ new.EnsureCapacity(16)
+ return new
+}
+
+// indexOf return table index and counter index together
+func (s *CountMinSketch) indexOf(h uint64, block uint64, offset uint8) (uint, uint) {
+ counterHash := h + uint64(1+offset)*(h>>32)
+ // max block + 7(8 * 8 bytes), fit 64 bytes cache line
+ index := block + counterHash&1 + uint64(offset<<1)
+ return uint(index), uint((counterHash & 0xF) << 2)
+}
+
+func (s *CountMinSketch) inc(index uint, offset uint) bool {
+ mask := uint64(0xF << offset)
+ if s.Table[index]&mask != mask {
+ s.Table[index] += 1 << offset
+ return true
+ }
+ return false
+}
+
+func (s *CountMinSketch) Add(h uint64) bool {
+ hn := spread(h)
+ block := (hn & uint64(s.BlockMask)) << 3
+ hc := rehash(h)
+ index0, offset0 := s.indexOf(hc, block, 0)
+ index1, offset1 := s.indexOf(hc, block, 1)
+ index2, offset2 := s.indexOf(hc, block, 2)
+ index3, offset3 := s.indexOf(hc, block, 3)
+
+ added := s.inc(index0, offset0)
+ added = s.inc(index1, offset1) || added
+ added = s.inc(index2, offset2) || added
+ added = s.inc(index3, offset3) || added
+
+ if added {
+ s.Additions += 1
+ if s.Additions == s.SampleSize {
+ s.reset()
+ return true
+ }
+ }
+ return false
+}
+
+func (s *CountMinSketch) reset() {
+ for i := range s.Table {
+ s.Table[i] = s.Table[i] >> 1
+ }
+ s.Additions = s.Additions >> 1
+}
+
+func (s *CountMinSketch) count(h uint64, block uint64, offset uint8) uint {
+ index, off := s.indexOf(h, block, offset)
+ count := (s.Table[index] >> off) & 0xF
+ return uint(count)
+}
+
+func (s *CountMinSketch) Estimate(h uint64) uint {
+ hn := spread(h)
+ block := (hn & uint64(s.BlockMask)) << 3
+ hc := rehash(h)
+ m := min(s.count(hc, block, 0), 100)
+ m = min(s.count(hc, block, 1), m)
+ m = min(s.count(hc, block, 2), m)
+ m = min(s.count(hc, block, 3), m)
+ return m
+}
+
+func next2Power(x uint) uint {
+ x--
+ x |= x >> 1
+ x |= x >> 2
+ x |= x >> 4
+ x |= x >> 8
+ x |= x >> 16
+ x |= x >> 32
+ x++
+ return x
+}
+
+func (s *CountMinSketch) EnsureCapacity(size uint) {
+ if len(s.Table) >= int(size) {
+ return
+ }
+ if size < 16 {
+ size = 16
+ }
+ newSize := next2Power(size)
+ s.Table = make([]uint64, newSize)
+ s.SampleSize = 10 * size
+ s.BlockMask = uint((len(s.Table) >> 3) - 1)
+ s.Additions = 0
+}
+
+func spread(h uint64) uint64 {
+ h ^= h >> 17
+ h *= 0xed5ad4bb
+ h ^= h >> 11
+ h *= 0xac4c1b51
+ h ^= h >> 15
+ return h
+}
+
+func rehash(h uint64) uint64 {
+ h *= 0x31848bab
+ h ^= h >> 14
+ return h
+}
diff --git a/go/cache/theine/sketch_test.go b/go/cache/theine/sketch_test.go
new file mode 100644
index 00000000000..3437f0cac3c
--- /dev/null
+++ b/go/cache/theine/sketch_test.go
@@ -0,0 +1,54 @@
+package theine
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/cespare/xxhash/v2"
+ "github.com/stretchr/testify/require"
+)
+
+func TestEnsureCapacity(t *testing.T) {
+ sketch := NewCountMinSketch()
+ sketch.EnsureCapacity(1)
+ require.Equal(t, 16, len(sketch.Table))
+}
+
+func TestSketch(t *testing.T) {
+ sketch := NewCountMinSketch()
+ sketch.EnsureCapacity(100)
+ require.Equal(t, 128, len(sketch.Table))
+ require.Equal(t, uint(1000), sketch.SampleSize)
+ // override sampleSize so test won't reset
+ sketch.SampleSize = 5120
+
+ failed := 0
+ for i := 0; i < 500; i++ {
+ key := fmt.Sprintf("key:%d", i)
+ keyh := xxhash.Sum64String(key)
+ sketch.Add(keyh)
+ sketch.Add(keyh)
+ sketch.Add(keyh)
+ sketch.Add(keyh)
+ sketch.Add(keyh)
+ key = fmt.Sprintf("key:%d:b", i)
+ keyh2 := xxhash.Sum64String(key)
+ sketch.Add(keyh2)
+ sketch.Add(keyh2)
+ sketch.Add(keyh2)
+
+ es1 := sketch.Estimate(keyh)
+ es2 := sketch.Estimate(keyh2)
+ if es2 > es1 {
+ failed++
+ }
+ require.True(t, es1 >= 5)
+ require.True(t, es2 >= 3)
+
+ }
+ require.True(t, float32(failed)/4000 < 0.1)
+ require.True(t, sketch.Additions > 3500)
+ a := sketch.Additions
+ sketch.reset()
+ require.Equal(t, a>>1, sketch.Additions)
+}
diff --git a/go/cache/theine/slru.go b/go/cache/theine/slru.go
new file mode 100644
index 00000000000..e3bcb2532b1
--- /dev/null
+++ b/go/cache/theine/slru.go
@@ -0,0 +1,79 @@
+/*
+Copyright 2023 The Vitess Authors.
+Copyright 2023 Yiling-J
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package theine
+
+type Slru[K cachekey, V any] struct {
+ probation *List[K, V]
+ protected *List[K, V]
+ maxsize uint
+}
+
+func NewSlru[K cachekey, V any](size uint) *Slru[K, V] {
+ return &Slru[K, V]{
+ maxsize: size,
+ probation: NewList[K, V](size, LIST_PROBATION),
+ protected: NewList[K, V](uint(float32(size)*0.8), LIST_PROTECTED),
+ }
+}
+
+func (s *Slru[K, V]) insert(entry *Entry[K, V]) *Entry[K, V] {
+ var evicted *Entry[K, V]
+ if s.probation.Len()+s.protected.Len() >= int(s.maxsize) {
+ evicted = s.probation.PopTail()
+ }
+ s.probation.PushFront(entry)
+ return evicted
+}
+
+func (s *Slru[K, V]) victim() *Entry[K, V] {
+ if s.probation.Len()+s.protected.Len() < int(s.maxsize) {
+ return nil
+ }
+ return s.probation.Back()
+}
+
+func (s *Slru[K, V]) access(entry *Entry[K, V]) {
+ switch entry.list {
+ case LIST_PROBATION:
+ s.probation.remove(entry)
+ evicted := s.protected.PushFront(entry)
+ if evicted != nil {
+ s.probation.PushFront(evicted)
+ }
+ case LIST_PROTECTED:
+ s.protected.MoveToFront(entry)
+ }
+}
+
+func (s *Slru[K, V]) remove(entry *Entry[K, V]) {
+ switch entry.list {
+ case LIST_PROBATION:
+ s.probation.remove(entry)
+ case LIST_PROTECTED:
+ s.protected.remove(entry)
+ }
+}
+
+func (s *Slru[K, V]) updateCost(entry *Entry[K, V], delta int64) {
+ switch entry.list {
+ case LIST_PROBATION:
+ s.probation.len += int(delta)
+ case LIST_PROTECTED:
+ s.protected.len += int(delta)
+ }
+}
diff --git a/go/cache/theine/store.go b/go/cache/theine/store.go
new file mode 100644
index 00000000000..3d86e549867
--- /dev/null
+++ b/go/cache/theine/store.go
@@ -0,0 +1,615 @@
+/*
+Copyright 2023 The Vitess Authors.
+Copyright 2023 Yiling-J
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package theine
+
+import (
+ "runtime"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/gammazero/deque"
+
+ "vitess.io/vitess/go/cache/theine/bf"
+ "vitess.io/vitess/go/hack"
+)
+
+const (
+ MaxReadBuffSize = 64
+ MinWriteBuffSize = 4
+ MaxWriteBuffSize = 1024
+)
+
+type RemoveReason uint8
+
+const (
+ REMOVED RemoveReason = iota
+ EVICTED
+ EXPIRED
+)
+
+type Shard[K cachekey, V any] struct {
+ hashmap map[K]*Entry[K, V]
+ dookeeper *bf.Bloomfilter
+ deque *deque.Deque[*Entry[K, V]]
+ group *Group[K, V]
+ qsize uint
+ qlen int
+ counter uint
+ mu sync.RWMutex
+}
+
+func NewShard[K cachekey, V any](size uint, qsize uint, doorkeeper bool) *Shard[K, V] {
+ s := &Shard[K, V]{
+ hashmap: make(map[K]*Entry[K, V]),
+ qsize: qsize,
+ deque: deque.New[*Entry[K, V]](),
+ group: NewGroup[K, V](),
+ }
+ if doorkeeper {
+ s.dookeeper = bf.New(0.01)
+ }
+ return s
+}
+
+func (s *Shard[K, V]) set(key K, entry *Entry[K, V]) {
+ s.hashmap[key] = entry
+ if s.dookeeper != nil {
+ ds := 20 * len(s.hashmap)
+ if ds > s.dookeeper.Capacity {
+ s.dookeeper.EnsureCapacity(ds)
+ }
+ }
+}
+
+func (s *Shard[K, V]) get(key K) (entry *Entry[K, V], ok bool) {
+ entry, ok = s.hashmap[key]
+ return
+}
+
+func (s *Shard[K, V]) delete(entry *Entry[K, V]) bool {
+ var deleted bool
+ exist, ok := s.hashmap[entry.key]
+ if ok && exist == entry {
+ delete(s.hashmap, exist.key)
+ deleted = true
+ }
+ return deleted
+}
+
+func (s *Shard[K, V]) len() int {
+ return len(s.hashmap)
+}
+
+type Metrics struct {
+ evicted atomic.Int64
+ hits atomic.Int64
+ misses atomic.Int64
+}
+
+func (m *Metrics) Evicted() int64 {
+ return m.evicted.Load()
+}
+
+func (m *Metrics) Hits() int64 {
+ return m.hits.Load()
+}
+
+func (m *Metrics) Misses() int64 {
+ return m.misses.Load()
+}
+
+func (m *Metrics) Accesses() int64 {
+ return m.Hits() + m.Misses()
+}
+
+type cachekey interface {
+ comparable
+ Hash() uint64
+ Hash2() (uint64, uint64)
+}
+
+type HashKey256 [32]byte
+
+func (h HashKey256) Hash() uint64 {
+ return uint64(h[0]) | uint64(h[1])<<8 | uint64(h[2])<<16 | uint64(h[3])<<24 |
+ uint64(h[4])<<32 | uint64(h[5])<<40 | uint64(h[6])<<48 | uint64(h[7])<<56
+}
+
+func (h HashKey256) Hash2() (uint64, uint64) {
+ h0 := h.Hash()
+ h1 := uint64(h[8]) | uint64(h[9])<<8 | uint64(h[10])<<16 | uint64(h[11])<<24 |
+ uint64(h[12])<<32 | uint64(h[13])<<40 | uint64(h[14])<<48 | uint64(h[15])<<56
+ return h0, h1
+}
+
+type StringKey string
+
+func (h StringKey) Hash() uint64 {
+ return hack.RuntimeStrhash(string(h), 13850135847636357301)
+}
+
+func (h StringKey) Hash2() (uint64, uint64) {
+ h0 := h.Hash()
+ h1 := ((h0 >> 16) ^ h0) * 0x45d9f3b
+ h1 = ((h1 >> 16) ^ h1) * 0x45d9f3b
+ h1 = (h1 >> 16) ^ h1
+ return h0, h1
+}
+
+type cacheval interface {
+ CachedSize(alloc bool) int64
+}
+
+type Store[K cachekey, V cacheval] struct {
+ Metrics Metrics
+ OnRemoval func(K, V, RemoveReason)
+
+ entryPool sync.Pool
+ writebuf chan WriteBufItem[K, V]
+ policy *TinyLfu[K, V]
+ readbuf *Queue[ReadBufItem[K, V]]
+ shards []*Shard[K, V]
+ cap uint
+ shardCount uint
+ writebufsize int64
+ tailUpdate bool
+ doorkeeper bool
+
+ mlock sync.Mutex
+ readCounter atomic.Uint32
+ open atomic.Bool
+}
+
+func NewStore[K cachekey, V cacheval](maxsize int64, doorkeeper bool) *Store[K, V] {
+ writeBufSize := maxsize / 100
+ if writeBufSize < MinWriteBuffSize {
+ writeBufSize = MinWriteBuffSize
+ }
+ if writeBufSize > MaxWriteBuffSize {
+ writeBufSize = MaxWriteBuffSize
+ }
+ shardCount := 1
+ for shardCount < runtime.GOMAXPROCS(0)*2 {
+ shardCount *= 2
+ }
+ if shardCount < 16 {
+ shardCount = 16
+ }
+ if shardCount > 128 {
+ shardCount = 128
+ }
+ dequeSize := int(maxsize) / 100 / shardCount
+ shardSize := int(maxsize) / shardCount
+ if shardSize < 50 {
+ shardSize = 50
+ }
+ policySize := int(maxsize) - (dequeSize * shardCount)
+
+ s := &Store[K, V]{
+ cap: uint(maxsize),
+ policy: NewTinyLfu[K, V](uint(policySize)),
+ readbuf: NewQueue[ReadBufItem[K, V]](),
+ writebuf: make(chan WriteBufItem[K, V], writeBufSize),
+ entryPool: sync.Pool{New: func() any { return &Entry[K, V]{} }},
+ shardCount: uint(shardCount),
+ doorkeeper: doorkeeper,
+ writebufsize: writeBufSize,
+ }
+ s.shards = make([]*Shard[K, V], 0, s.shardCount)
+ for i := 0; i < int(s.shardCount); i++ {
+ s.shards = append(s.shards, NewShard[K, V](uint(shardSize), uint(dequeSize), doorkeeper))
+ }
+
+ go s.maintenance()
+ s.open.Store(true)
+ return s
+}
+
+func (s *Store[K, V]) EnsureOpen() {
+ if s.open.Swap(true) {
+ return
+ }
+ s.writebuf = make(chan WriteBufItem[K, V], s.writebufsize)
+ go s.maintenance()
+}
+
+func (s *Store[K, V]) getFromShard(key K, hash uint64, shard *Shard[K, V], epoch uint32) (V, bool) {
+ new := s.readCounter.Add(1)
+ shard.mu.RLock()
+ entry, ok := shard.get(key)
+ var value V
+ if ok {
+ if entry.epoch.Load() < epoch {
+ s.Metrics.misses.Add(1)
+ ok = false
+ } else {
+ s.Metrics.hits.Add(1)
+ s.policy.hit.Add(1)
+ value = entry.value
+ }
+ } else {
+ s.Metrics.misses.Add(1)
+ }
+ shard.mu.RUnlock()
+ switch {
+ case new < MaxReadBuffSize:
+ var send ReadBufItem[K, V]
+ send.hash = hash
+ if ok {
+ send.entry = entry
+ }
+ s.readbuf.Push(send)
+ case new == MaxReadBuffSize:
+ var send ReadBufItem[K, V]
+ send.hash = hash
+ if ok {
+ send.entry = entry
+ }
+ s.readbuf.Push(send)
+ s.drainRead()
+ }
+ return value, ok
+}
+
+func (s *Store[K, V]) Get(key K, epoch uint32) (V, bool) {
+ h, index := s.index(key)
+ shard := s.shards[index]
+ return s.getFromShard(key, h, shard, epoch)
+}
+
+func (s *Store[K, V]) GetOrLoad(key K, epoch uint32, load func() (V, error)) (V, bool, error) {
+ h, index := s.index(key)
+ shard := s.shards[index]
+ v, ok := s.getFromShard(key, h, shard, epoch)
+ if !ok {
+ loaded, err, _ := shard.group.Do(key, func() (V, error) {
+ loaded, err := load()
+ if err == nil {
+ s.Set(key, loaded, 0, epoch)
+ }
+ return loaded, err
+ })
+ return loaded, false, err
+ }
+ return v, true, nil
+}
+
+func (s *Store[K, V]) setEntry(shard *Shard[K, V], cost int64, epoch uint32, entry *Entry[K, V]) {
+ shard.set(entry.key, entry)
+ // cost larger than deque size, send to policy directly
+ if cost > int64(shard.qsize) {
+ shard.mu.Unlock()
+ s.writebuf <- WriteBufItem[K, V]{entry: entry, code: NEW}
+ return
+ }
+ entry.deque = true
+ shard.deque.PushFront(entry)
+ shard.qlen += int(cost)
+ s.processDeque(shard, epoch)
+}
+
+func (s *Store[K, V]) setInternal(key K, value V, cost int64, epoch uint32) (*Shard[K, V], *Entry[K, V], bool) {
+ h, index := s.index(key)
+ shard := s.shards[index]
+ shard.mu.Lock()
+ exist, ok := shard.get(key)
+ if ok {
+ var costChange int64
+ exist.value = value
+ oldCost := exist.cost.Swap(cost)
+ if oldCost != cost {
+ costChange = cost - oldCost
+ if exist.deque {
+ shard.qlen += int(costChange)
+ }
+ }
+ shard.mu.Unlock()
+ exist.epoch.Store(epoch)
+ if costChange != 0 {
+ s.writebuf <- WriteBufItem[K, V]{
+ entry: exist, code: UPDATE, costChange: costChange,
+ }
+ }
+ return shard, exist, true
+ }
+ if s.doorkeeper {
+ if shard.counter > uint(shard.dookeeper.Capacity) {
+ shard.dookeeper.Reset()
+ shard.counter = 0
+ }
+ hit := shard.dookeeper.Insert(h)
+ if !hit {
+ shard.counter += 1
+ shard.mu.Unlock()
+ return shard, nil, false
+ }
+ }
+ entry := s.entryPool.Get().(*Entry[K, V])
+ entry.frequency.Store(-1)
+ entry.key = key
+ entry.value = value
+ entry.cost.Store(cost)
+ entry.epoch.Store(epoch)
+ s.setEntry(shard, cost, epoch, entry)
+ return shard, entry, true
+
+}
+
+func (s *Store[K, V]) Set(key K, value V, cost int64, epoch uint32) bool {
+ if cost == 0 {
+ cost = value.CachedSize(true)
+ }
+ if cost > int64(s.cap) {
+ return false
+ }
+ _, _, ok := s.setInternal(key, value, cost, epoch)
+ return ok
+}
+
+type dequeKV[K cachekey, V cacheval] struct {
+ k K
+ v V
+}
+
+func (s *Store[K, V]) processDeque(shard *Shard[K, V], epoch uint32) {
+ if shard.qlen <= int(shard.qsize) {
+ shard.mu.Unlock()
+ return
+ }
+ var evictedkv []dequeKV[K, V]
+ var expiredkv []dequeKV[K, V]
+
+ // send to slru
+ send := make([]*Entry[K, V], 0, 2)
+ for shard.qlen > int(shard.qsize) {
+ evicted := shard.deque.PopBack()
+ evicted.deque = false
+ shard.qlen -= int(evicted.cost.Load())
+
+ if evicted.epoch.Load() < epoch {
+ deleted := shard.delete(evicted)
+ if deleted {
+ if s.OnRemoval != nil {
+ evictedkv = append(evictedkv, dequeKV[K, V]{evicted.key, evicted.value})
+ }
+ s.postDelete(evicted)
+ s.Metrics.evicted.Add(1)
+ }
+ } else {
+ count := evicted.frequency.Load()
+ threshold := s.policy.threshold.Load()
+ if count == -1 {
+ send = append(send, evicted)
+ } else {
+ if int32(count) >= threshold {
+ send = append(send, evicted)
+ } else {
+ deleted := shard.delete(evicted)
+ // double check because entry maybe removed already by Delete API
+ if deleted {
+ if s.OnRemoval != nil {
+ evictedkv = append(evictedkv, dequeKV[K, V]{evicted.key, evicted.value})
+ }
+ s.postDelete(evicted)
+ s.Metrics.evicted.Add(1)
+ }
+ }
+ }
+ }
+ }
+
+ shard.mu.Unlock()
+ for _, entry := range send {
+ s.writebuf <- WriteBufItem[K, V]{entry: entry, code: NEW}
+ }
+ if s.OnRemoval != nil {
+ for _, kv := range evictedkv {
+ s.OnRemoval(kv.k, kv.v, EVICTED)
+ }
+ for _, kv := range expiredkv {
+ s.OnRemoval(kv.k, kv.v, EXPIRED)
+ }
+ }
+}
+
+func (s *Store[K, V]) Delete(key K) {
+ _, index := s.index(key)
+ shard := s.shards[index]
+ shard.mu.Lock()
+ entry, ok := shard.get(key)
+ if ok {
+ shard.delete(entry)
+ }
+ shard.mu.Unlock()
+ if ok {
+ s.writebuf <- WriteBufItem[K, V]{entry: entry, code: REMOVE}
+ }
+}
+
+func (s *Store[K, V]) Len() int {
+ total := 0
+ for _, s := range s.shards {
+ s.mu.RLock()
+ total += s.len()
+ s.mu.RUnlock()
+ }
+ return total
+}
+
+func (s *Store[K, V]) UsedCapacity() int {
+ total := 0
+ for _, s := range s.shards {
+ s.mu.RLock()
+ total += s.qlen
+ s.mu.RUnlock()
+ }
+ return total
+}
+
+func (s *Store[K, V]) MaxCapacity() int {
+ return int(s.cap)
+}
+
+// spread hash before get index
+func (s *Store[K, V]) index(key K) (uint64, int) {
+ h0, h1 := key.Hash2()
+ return h0, int(h1 & uint64(s.shardCount-1))
+}
+
+func (s *Store[K, V]) postDelete(entry *Entry[K, V]) {
+ var zero V
+ entry.value = zero
+ s.entryPool.Put(entry)
+}
+
+// remove entry from cache/policy/timingwheel and add back to pool
+func (s *Store[K, V]) removeEntry(entry *Entry[K, V], reason RemoveReason) {
+ if prev := entry.meta.prev; prev != nil {
+ s.policy.Remove(entry)
+ }
+ switch reason {
+ case EVICTED, EXPIRED:
+ _, index := s.index(entry.key)
+ shard := s.shards[index]
+ shard.mu.Lock()
+ deleted := shard.delete(entry)
+ shard.mu.Unlock()
+ if deleted {
+ if s.OnRemoval != nil {
+ s.OnRemoval(entry.key, entry.value, reason)
+ }
+ s.postDelete(entry)
+ s.Metrics.evicted.Add(1)
+ }
+ case REMOVED:
+ // already removed from shard map
+ if s.OnRemoval != nil {
+ s.OnRemoval(entry.key, entry.value, reason)
+ }
+ }
+}
+
+func (s *Store[K, V]) drainRead() {
+ s.policy.total.Add(MaxReadBuffSize)
+ s.mlock.Lock()
+ for {
+ v, ok := s.readbuf.Pop()
+ if !ok {
+ break
+ }
+ s.policy.Access(v)
+ }
+ s.mlock.Unlock()
+ s.readCounter.Store(0)
+}
+
+func (s *Store[K, V]) maintenanceItem(item WriteBufItem[K, V]) {
+ s.mlock.Lock()
+ defer s.mlock.Unlock()
+
+ entry := item.entry
+ if entry == nil {
+ return
+ }
+
+ // lock free because store API never read/modify entry metadata
+ switch item.code {
+ case NEW:
+ if entry.removed {
+ return
+ }
+ evicted := s.policy.Set(entry)
+ if evicted != nil {
+ s.removeEntry(evicted, EVICTED)
+ s.tailUpdate = true
+ }
+ removed := s.policy.EvictEntries()
+ for _, e := range removed {
+ s.tailUpdate = true
+ s.removeEntry(e, EVICTED)
+ }
+ case REMOVE:
+ entry.removed = true
+ s.removeEntry(entry, REMOVED)
+ s.policy.threshold.Store(-1)
+ case UPDATE:
+ if item.costChange != 0 {
+ s.policy.UpdateCost(entry, item.costChange)
+ removed := s.policy.EvictEntries()
+ for _, e := range removed {
+ s.tailUpdate = true
+ s.removeEntry(e, EVICTED)
+ }
+ }
+ }
+ item.entry = nil
+ if s.tailUpdate {
+ s.policy.UpdateThreshold()
+ s.tailUpdate = false
+ }
+}
+
+func (s *Store[K, V]) maintenance() {
+ tick := time.NewTicker(500 * time.Millisecond)
+ defer tick.Stop()
+
+ for {
+ select {
+ case <-tick.C:
+ s.mlock.Lock()
+ s.policy.UpdateThreshold()
+ s.mlock.Unlock()
+
+ case item, ok := <-s.writebuf:
+ if !ok {
+ return
+ }
+ s.maintenanceItem(item)
+ }
+ }
+}
+
+func (s *Store[K, V]) Range(epoch uint32, f func(key K, value V) bool) {
+ for _, shard := range s.shards {
+ shard.mu.RLock()
+ for _, entry := range shard.hashmap {
+ if entry.epoch.Load() < epoch {
+ continue
+ }
+ if !f(entry.key, entry.value) {
+ shard.mu.RUnlock()
+ return
+ }
+ }
+ shard.mu.RUnlock()
+ }
+}
+
+func (s *Store[K, V]) Close() {
+ if !s.open.Swap(false) {
+ panic("theine.Store: double close")
+ }
+
+ for _, s := range s.shards {
+ s.mu.Lock()
+ clear(s.hashmap)
+ s.mu.Unlock()
+ }
+ close(s.writebuf)
+}
diff --git a/go/cache/theine/store_test.go b/go/cache/theine/store_test.go
new file mode 100644
index 00000000000..880acf30193
--- /dev/null
+++ b/go/cache/theine/store_test.go
@@ -0,0 +1,82 @@
+/*
+Copyright 2023 The Vitess Authors.
+Copyright 2023 Yiling-J
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package theine
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+type cachedint int
+
+func (ci cachedint) CachedSize(bool) int64 {
+ return 1
+}
+
+type keyint int
+
+func (k keyint) Hash() uint64 {
+ return uint64(k)
+}
+
+func (k keyint) Hash2() (uint64, uint64) {
+ return uint64(k), uint64(k) * 333
+}
+
+func TestProcessDeque(t *testing.T) {
+ store := NewStore[keyint, cachedint](20000, false)
+
+ evicted := map[keyint]cachedint{}
+ store.OnRemoval = func(key keyint, value cachedint, reason RemoveReason) {
+ if reason == EVICTED {
+ evicted[key] = value
+ }
+ }
+ _, index := store.index(123)
+ shard := store.shards[index]
+ shard.qsize = 10
+
+ for i := keyint(0); i < 5; i++ {
+ entry := &Entry[keyint, cachedint]{key: i}
+ entry.cost.Store(1)
+ store.shards[index].deque.PushFront(entry)
+ store.shards[index].qlen += 1
+ store.shards[index].hashmap[i] = entry
+ }
+
+ // move 0,1,2 entries to slru
+ store.Set(123, 123, 8, 0)
+ require.Equal(t, store.shards[index].deque.Len(), 3)
+ var keys []keyint
+ for store.shards[index].deque.Len() != 0 {
+ e := store.shards[index].deque.PopBack()
+ keys = append(keys, e.key)
+ }
+ require.Equal(t, []keyint{3, 4, 123}, keys)
+}
+
+func TestDoorKeeperDynamicSize(t *testing.T) {
+ store := NewStore[keyint, cachedint](200000, true)
+ shard := store.shards[0]
+ require.True(t, shard.dookeeper.Capacity == 512)
+ for i := keyint(0); i < 5000; i++ {
+ shard.set(i, &Entry[keyint, cachedint]{})
+ }
+ require.True(t, shard.dookeeper.Capacity > 100000)
+}
diff --git a/go/cache/theine/tlfu.go b/go/cache/theine/tlfu.go
new file mode 100644
index 00000000000..f7a4f8dec51
--- /dev/null
+++ b/go/cache/theine/tlfu.go
@@ -0,0 +1,197 @@
+/*
+Copyright 2023 The Vitess Authors.
+Copyright 2023 Yiling-J
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package theine
+
+import (
+ "sync/atomic"
+)
+
+type TinyLfu[K cachekey, V any] struct {
+ slru *Slru[K, V]
+ sketch *CountMinSketch
+ size uint
+ counter uint
+ total atomic.Uint32
+ hit atomic.Uint32
+ hr float32
+ threshold atomic.Int32
+ lruFactor uint8
+ step int8
+}
+
+func NewTinyLfu[K cachekey, V any](size uint) *TinyLfu[K, V] {
+ tlfu := &TinyLfu[K, V]{
+ size: size,
+ slru: NewSlru[K, V](size),
+ sketch: NewCountMinSketch(),
+ step: 1,
+ }
+ // default threshold to -1 so all entries are admitted until cache is full
+ tlfu.threshold.Store(-1)
+ return tlfu
+}
+
+func (t *TinyLfu[K, V]) climb() {
+ total := t.total.Load()
+ hit := t.hit.Load()
+ current := float32(hit) / float32(total)
+ delta := current - t.hr
+ var diff int8
+ if delta > 0.0 {
+ if t.step < 0 {
+ t.step -= 1
+ } else {
+ t.step += 1
+ }
+ if t.step < -13 {
+ t.step = -13
+ } else if t.step > 13 {
+ t.step = 13
+ }
+ newFactor := int8(t.lruFactor) + t.step
+ if newFactor < 0 {
+ newFactor = 0
+ } else if newFactor > 16 {
+ newFactor = 16
+ }
+ diff = newFactor - int8(t.lruFactor)
+ t.lruFactor = uint8(newFactor)
+ } else if delta < 0.0 {
+ // reset
+ if t.step > 0 {
+ t.step = -1
+ } else {
+ t.step = 1
+ }
+ newFactor := int8(t.lruFactor) + t.step
+ if newFactor < 0 {
+ newFactor = 0
+ } else if newFactor > 16 {
+ newFactor = 16
+ }
+ diff = newFactor - int8(t.lruFactor)
+ t.lruFactor = uint8(newFactor)
+ }
+ t.threshold.Add(-int32(diff))
+ t.hr = current
+ t.hit.Store(0)
+ t.total.Store(0)
+}
+
+func (t *TinyLfu[K, V]) Set(entry *Entry[K, V]) *Entry[K, V] {
+ t.counter++
+ if t.counter > 10*t.size {
+ t.climb()
+ t.counter = 0
+ }
+ if entry.meta.prev == nil {
+ if victim := t.slru.victim(); victim != nil {
+ freq := int(entry.frequency.Load())
+ if freq == -1 {
+ freq = int(t.sketch.Estimate(entry.key.Hash()))
+ }
+ evictedCount := uint(freq) + uint(t.lruFactor)
+ victimCount := t.sketch.Estimate(victim.key.Hash())
+ if evictedCount <= uint(victimCount) {
+ return entry
+ }
+ } else {
+ count := t.slru.probation.count + t.slru.protected.count
+ t.sketch.EnsureCapacity(uint(count + count/100))
+ }
+ evicted := t.slru.insert(entry)
+ return evicted
+ }
+
+ return nil
+}
+
+func (t *TinyLfu[K, V]) Access(item ReadBufItem[K, V]) {
+ t.counter++
+ if t.counter > 10*t.size {
+ t.climb()
+ t.counter = 0
+ }
+ if entry := item.entry; entry != nil {
+ reset := t.sketch.Add(item.hash)
+ if reset {
+ t.threshold.Store(t.threshold.Load() / 2)
+ }
+ if entry.meta.prev != nil {
+ var tail bool
+ if entry == t.slru.victim() {
+ tail = true
+ }
+ t.slru.access(entry)
+ if tail {
+ t.UpdateThreshold()
+ }
+ } else {
+ entry.frequency.Store(int32(t.sketch.Estimate(item.hash)))
+ }
+ } else {
+ reset := t.sketch.Add(item.hash)
+ if reset {
+ t.threshold.Store(t.threshold.Load() / 2)
+ }
+ }
+}
+
+func (t *TinyLfu[K, V]) Remove(entry *Entry[K, V]) {
+ t.slru.remove(entry)
+}
+
+func (t *TinyLfu[K, V]) UpdateCost(entry *Entry[K, V], delta int64) {
+ t.slru.updateCost(entry, delta)
+}
+
+func (t *TinyLfu[K, V]) EvictEntries() []*Entry[K, V] {
+ removed := []*Entry[K, V]{}
+
+ for t.slru.probation.Len()+t.slru.protected.Len() > int(t.slru.maxsize) {
+ entry := t.slru.probation.PopTail()
+ if entry == nil {
+ break
+ }
+ removed = append(removed, entry)
+ }
+ for t.slru.probation.Len()+t.slru.protected.Len() > int(t.slru.maxsize) {
+ entry := t.slru.protected.PopTail()
+ if entry == nil {
+ break
+ }
+ removed = append(removed, entry)
+ }
+ return removed
+}
+
+func (t *TinyLfu[K, V]) UpdateThreshold() {
+ if t.slru.probation.Len()+t.slru.protected.Len() < int(t.slru.maxsize) {
+ t.threshold.Store(-1)
+ } else {
+ tail := t.slru.victim()
+ if tail != nil {
+ t.threshold.Store(
+ int32(t.sketch.Estimate(tail.key.Hash()) - uint(t.lruFactor)),
+ )
+ } else {
+ // cache is not full
+ t.threshold.Store(-1)
+ }
+ }
+}
diff --git a/go/cache/theine/tlfu_test.go b/go/cache/theine/tlfu_test.go
new file mode 100644
index 00000000000..ac6ddaabdb6
--- /dev/null
+++ b/go/cache/theine/tlfu_test.go
@@ -0,0 +1,156 @@
+/*
+Copyright 2023 The Vitess Authors.
+Copyright 2023 Yiling-J
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package theine
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestTlfu(t *testing.T) {
+ tlfu := NewTinyLfu[StringKey, string](1000)
+ require.Equal(t, uint(1000), tlfu.slru.probation.capacity)
+ require.Equal(t, uint(800), tlfu.slru.protected.capacity)
+ require.Equal(t, 0, tlfu.slru.probation.len)
+ require.Equal(t, 0, tlfu.slru.protected.len)
+
+ var entries []*Entry[StringKey, string]
+ for i := 0; i < 200; i++ {
+ e := NewEntry(StringKey(fmt.Sprintf("%d", i)), "", 1)
+ evicted := tlfu.Set(e)
+ entries = append(entries, e)
+ require.Nil(t, evicted)
+ }
+
+ require.Equal(t, 200, tlfu.slru.probation.len)
+ require.Equal(t, 0, tlfu.slru.protected.len)
+
+ // probation -> protected
+ tlfu.Access(ReadBufItem[StringKey, string]{entry: entries[11]})
+ require.Equal(t, 199, tlfu.slru.probation.len)
+ require.Equal(t, 1, tlfu.slru.protected.len)
+ tlfu.Access(ReadBufItem[StringKey, string]{entry: entries[11]})
+ require.Equal(t, 199, tlfu.slru.probation.len)
+ require.Equal(t, 1, tlfu.slru.protected.len)
+
+ for i := 200; i < 1000; i++ {
+ e := NewEntry(StringKey(fmt.Sprintf("%d", i)), "", 1)
+ entries = append(entries, e)
+ evicted := tlfu.Set(e)
+ require.Nil(t, evicted)
+ }
+ // access protected
+ tlfu.Access(ReadBufItem[StringKey, string]{entry: entries[11]})
+ require.Equal(t, 999, tlfu.slru.probation.len)
+ require.Equal(t, 1, tlfu.slru.protected.len)
+
+ evicted := tlfu.Set(NewEntry(StringKey("0a"), "", 1))
+ require.Equal(t, StringKey("0a"), evicted.key)
+ require.Equal(t, 999, tlfu.slru.probation.len)
+ require.Equal(t, 1, tlfu.slru.protected.len)
+
+ victim := tlfu.slru.victim()
+ require.Equal(t, StringKey("0"), victim.key)
+ tlfu.Access(ReadBufItem[StringKey, string]{entry: entries[991]})
+ tlfu.Access(ReadBufItem[StringKey, string]{entry: entries[991]})
+ tlfu.Access(ReadBufItem[StringKey, string]{entry: entries[991]})
+ tlfu.Access(ReadBufItem[StringKey, string]{entry: entries[991]})
+ evicted = tlfu.Set(NewEntry(StringKey("1a"), "", 1))
+ require.Equal(t, StringKey("1a"), evicted.key)
+ require.Equal(t, 998, tlfu.slru.probation.len)
+
+ var entries2 []*Entry[StringKey, string]
+ for i := 0; i < 1000; i++ {
+ e := NewEntry(StringKey(fmt.Sprintf("%d*", i)), "", 1)
+ tlfu.Set(e)
+ entries2 = append(entries2, e)
+ }
+ require.Equal(t, 998, tlfu.slru.probation.len)
+ require.Equal(t, 2, tlfu.slru.protected.len)
+
+ for _, i := range []int{997, 998, 999} {
+ tlfu.Remove(entries2[i])
+ tlfu.slru.probation.display()
+ tlfu.slru.probation.displayReverse()
+ tlfu.slru.protected.display()
+ tlfu.slru.protected.displayReverse()
+ }
+
+}
+
+func TestEvictEntries(t *testing.T) {
+ tlfu := NewTinyLfu[StringKey, string](500)
+ require.Equal(t, uint(500), tlfu.slru.probation.capacity)
+ require.Equal(t, uint(400), tlfu.slru.protected.capacity)
+ require.Equal(t, 0, tlfu.slru.probation.len)
+ require.Equal(t, 0, tlfu.slru.protected.len)
+
+ for i := 0; i < 500; i++ {
+ tlfu.Set(NewEntry(StringKey(fmt.Sprintf("%d:1", i)), "", 1))
+ }
+ require.Equal(t, 500, tlfu.slru.probation.len)
+ require.Equal(t, 0, tlfu.slru.protected.len)
+ new := NewEntry(StringKey("l:10"), "", 10)
+ new.frequency.Store(10)
+ tlfu.Set(new)
+ require.Equal(t, 509, tlfu.slru.probation.len)
+ require.Equal(t, 0, tlfu.slru.protected.len)
+ // 2. probation length is 509, so remove 9 entries from probation
+ removed := tlfu.EvictEntries()
+ for _, rm := range removed {
+ require.True(t, strings.HasSuffix(string(rm.key), ":1"))
+ }
+ require.Equal(t, 9, len(removed))
+ require.Equal(t, 500, tlfu.slru.probation.len)
+ require.Equal(t, 0, tlfu.slru.protected.len)
+
+ // put l:450 to probation, this will remove 1 entry, probation len is 949 now
+ // remove 449 entries from probation
+ new = NewEntry(StringKey("l:450"), "", 450)
+ new.frequency.Store(10)
+ tlfu.Set(new)
+ removed = tlfu.EvictEntries()
+ require.Equal(t, 449, len(removed))
+ require.Equal(t, 500, tlfu.slru.probation.len)
+ require.Equal(t, 0, tlfu.slru.protected.len)
+
+ // put l:460 to probation, this will remove 1 entry, probation len is 959 now
+ // remove all entries except the new l:460 one
+ new = NewEntry(StringKey("l:460"), "", 460)
+ new.frequency.Store(10)
+ tlfu.Set(new)
+ removed = tlfu.EvictEntries()
+ require.Equal(t, 41, len(removed))
+ require.Equal(t, 460, tlfu.slru.probation.len)
+ require.Equal(t, 0, tlfu.slru.protected.len)
+
+ // access
+ tlfu.Access(ReadBufItem[StringKey, string]{entry: new})
+ require.Equal(t, 0, tlfu.slru.probation.len)
+ require.Equal(t, 460, tlfu.slru.protected.len)
+ new.cost.Store(600)
+ tlfu.UpdateCost(new, 140)
+ removed = tlfu.EvictEntries()
+ require.Equal(t, 1, len(removed))
+ require.Equal(t, 0, tlfu.slru.probation.len)
+ require.Equal(t, 0, tlfu.slru.protected.len)
+
+}
diff --git a/go/cmd/internal/docgen/docgen.go b/go/cmd/internal/docgen/docgen.go
index 6fe461e5af7..f52042e80af 100644
--- a/go/cmd/internal/docgen/docgen.go
+++ b/go/cmd/internal/docgen/docgen.go
@@ -46,8 +46,10 @@ import (
"fmt"
"io/fs"
"os"
+ "os/exec"
"path/filepath"
"strings"
+ "sync"
"github.com/spf13/cobra"
"github.com/spf13/cobra/doc"
@@ -57,6 +59,10 @@ import (
// written to `dir`. The root command is also renamed to _index.md to remain
// compatible with the vitessio/website content structure expectations.
func GenerateMarkdownTree(cmd *cobra.Command, dir string) error {
+ sha, err := getCommitID("HEAD")
+ if err != nil {
+ return fmt.Errorf("failed to get commit id for HEAD: %w", err)
+ }
switch fi, err := os.Stat(dir); {
case errors.Is(err, fs.ErrNotExist):
if err := os.MkdirAll(dir, 0755); err != nil {
@@ -69,7 +75,7 @@ func GenerateMarkdownTree(cmd *cobra.Command, dir string) error {
}
recursivelyDisableAutoGenTags(cmd)
- if err := doc.GenMarkdownTreeCustom(cmd, dir, frontmatterFilePrepender, linkHandler); err != nil {
+ if err := doc.GenMarkdownTreeCustom(cmd, dir, frontmatterFilePrepender(sha), linkHandler); err != nil {
return err
}
@@ -79,6 +85,120 @@ func GenerateMarkdownTree(cmd *cobra.Command, dir string) error {
return fmt.Errorf("failed to index doc (generated at %s) into proper position (%s): %w", rootDocPath, indexDocPath, err)
}
+ if err := anonymizeHomedir(indexDocPath); err != nil {
+ return fmt.Errorf("failed to anonymize homedir in help text for command %s: %w", indexDocPath, err)
+ }
+
+ if err := restructure(dir, dir, cmd.Name(), cmd.Commands()); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+/*
+_index.md (aka vtctldclient.md)
+vtctldclient_AddCellInfo.md
+vtctldclient_movetables.md
+vtctldclient_movetables_show.md
+
+becomes
+
+_index.md
+vtctldclient_AddCellInfo.md
+vtctldclient_movetables/
+ _index.md
+ vtctldclient_movetables_show.md
+*/
+
+func restructure(rootDir string, dir string, name string, commands []*cobra.Command) error {
+ for _, cmd := range commands {
+ fullCmdFilename := strings.Join([]string{name, cmd.Name()}, "_")
+
+ children := cmd.Commands()
+
+ switch {
+ case len(children) > 0:
+ // Command (top-level or not) with children.
+ // 1. Set up a directory for its children.
+ // 2. Move its doc into that dir as "_index.md"
+ // 3. Restructure its children.
+ cmdDir := filepath.Join(dir, fullCmdFilename)
+ if err := os.MkdirAll(cmdDir, 0755); err != nil {
+ return fmt.Errorf("failed to create subdir for %s: %w", fullCmdFilename, err)
+ }
+
+ indexFile := filepath.Join(cmdDir, "_index.md")
+ if err := os.Rename(filepath.Join(rootDir, fullCmdFilename+".md"), indexFile); err != nil {
+ return fmt.Errorf("failed to move index doc for command %s with children: %w", fullCmdFilename, err)
+ }
+
+ if err := anonymizeHomedir(indexFile); err != nil {
+ return fmt.Errorf("failed to anonymize homedir in help text for command %s: %w", indexFile, err)
+ }
+
+ if err := restructure(rootDir, cmdDir, fullCmdFilename, children); err != nil {
+ return fmt.Errorf("failed to restructure child commands for %s: %w", fullCmdFilename, err)
+ }
+ case rootDir != dir:
+ // Sub-command without children.
+ // 1. Move its doc into the directory for its parent, name unchanged.
+ if cmd.Name() == "help" {
+ // all commands with children have their own "help" subcommand,
+ // which we do not generate docs for
+ continue
+ }
+
+ oldName := filepath.Join(rootDir, fullCmdFilename+".md")
+ newName := filepath.Join(dir, fullCmdFilename+".md")
+
+ if err := os.Rename(oldName, newName); err != nil {
+ return fmt.Errorf("failed to move child command %s to its parent's dir: %w", fullCmdFilename, err)
+ }
+
+ sed := newParentLinkSedCommand(name, newName)
+ if out, err := sed.CombinedOutput(); err != nil {
+ return fmt.Errorf("failed to rewrite links to parent command in child %s: %w (extra: %s)", newName, err, out)
+ }
+
+ if err := anonymizeHomedir(newName); err != nil {
+ return fmt.Errorf("failed to anonymize homedir in help text for command %s: %w", newName, err)
+ }
+ default:
+ // Top-level command without children. Nothing to restructure.
+ continue
+ }
+ }
+
+ return nil
+}
+
+func newParentLinkSedCommand(parent string, file string) *exec.Cmd {
+ return exec.Command("sed", "-i", "", "-e", fmt.Sprintf("s:(./%s/):(../):i", parent), file)
+}
+
+var (
+ wd string
+ once sync.Once
+)
+
+func anonymizeHomedir(file string) (err error) {
+ once.Do(func() {
+ // Only do this once per run.
+ wd, err = os.Getwd()
+ })
+ if err != nil {
+ return err
+ }
+
+ // We're replacing the stuff inside the square brackets in the example sed
+ // below:
+ // 's:Paths to search for config files in. (default \[.*\])$:Paths to search for config files in. (default \[\]):'
+ sed := exec.Command("sed", "-i", "", "-e", fmt.Sprintf("s:%s::i", wd), file)
+ if out, err := sed.CombinedOutput(); err != nil {
+ return fmt.Errorf("%w: %s", err, out)
+ }
+
return nil
}
@@ -91,31 +211,47 @@ func recursivelyDisableAutoGenTags(root *cobra.Command) {
}
}
+func getCommitID(ref string) (string, error) {
+ gitShow := exec.Command("git", "show", "--pretty=format:%H", "--no-patch", ref)
+ out, err := gitShow.Output()
+ if err != nil {
+ return "", err
+ }
+
+ return string(out), nil
+}
+
const frontmatter = `---
title: %s
series: %s
+commit: %s
---
`
-func frontmatterFilePrepender(filename string) string {
- name := filepath.Base(filename)
- base := strings.TrimSuffix(name, filepath.Ext(name))
+func frontmatterFilePrepender(sha string) func(filename string) string {
+ return func(filename string) string {
+ name := filepath.Base(filename)
+ base := strings.TrimSuffix(name, filepath.Ext(name))
- root, cmdName, ok := strings.Cut(base, "_")
- if !ok { // no `_`, so not a subcommand
- cmdName = root
- }
+ root, cmdName, ok := strings.Cut(base, "_")
+ if !ok { // no `_`, so not a subcommand
+ cmdName = root
+ }
- return fmt.Sprintf(frontmatter, cmdName, root)
+ cmdName = strings.ReplaceAll(cmdName, "_", " ")
+
+ return fmt.Sprintf(frontmatter, cmdName, root, sha)
+ }
}
func linkHandler(filename string) string {
- name := filepath.Base(filename)
- base := strings.TrimSuffix(name, filepath.Ext(name))
+ base := filepath.Base(filename)
+ name := strings.TrimSuffix(base, filepath.Ext(base))
- if _, _, ok := strings.Cut(base, "_"); !ok {
+ _, _, ok := strings.Cut(name, "_")
+ if !ok {
return "../"
}
- return fmt.Sprintf("./%s/", strings.ToLower(base))
+ return fmt.Sprintf("./%s/", strings.ToLower(name))
}
diff --git a/go/cmd/mysqlctl/command/init.go b/go/cmd/mysqlctl/command/init.go
new file mode 100644
index 00000000000..71a9661aa80
--- /dev/null
+++ b/go/cmd/mysqlctl/command/init.go
@@ -0,0 +1,71 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package command
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/vt/mysqlctl"
+)
+
+var Init = &cobra.Command{
+ Use: "init",
+ Short: "Initializes the directory structure and starts mysqld.",
+ Long: "Bootstraps a new `mysqld` instance, initializes its data directory, and starts the instance.\n" +
+ "The MySQL version and flavor will be auto-detected, with a minimal configuration file applied.",
+ Example: `mysqlctl \
+ --alsologtostderr \
+ --tablet_uid 101 \
+ --mysql_port 12345 \
+ init`,
+ Args: cobra.NoArgs,
+ RunE: commandInit,
+}
+
+var initArgs = struct {
+ WaitTime time.Duration
+ InitDbSQLFile string
+}{
+ WaitTime: 5 * time.Minute,
+}
+
+func commandInit(cmd *cobra.Command, args []string) error {
+ // Generate my.cnf from scratch and use it to find mysqld.
+ mysqld, cnf, err := mysqlctl.CreateMysqldAndMycnf(tabletUID, mysqlSocket, mysqlPort)
+ if err != nil {
+ return fmt.Errorf("failed to initialize mysql config: %v", err)
+ }
+ defer mysqld.Close()
+
+ ctx, cancel := context.WithTimeout(context.Background(), initArgs.WaitTime)
+ defer cancel()
+ if err := mysqld.Init(ctx, cnf, initArgs.InitDbSQLFile); err != nil {
+ return fmt.Errorf("failed init mysql: %v", err)
+ }
+ return nil
+}
+
+func init() {
+ Init.Flags().DurationVar(&initArgs.WaitTime, "wait_time", initArgs.WaitTime, "How long to wait for mysqld startup.")
+ Init.Flags().StringVar(&initArgs.InitDbSQLFile, "init_db_sql_file", initArgs.InitDbSQLFile, "Path to .sql file to run after mysqld initiliaztion.")
+
+ Root.AddCommand(Init)
+}
diff --git a/go/cmd/mysqlctl/command/init_config.go b/go/cmd/mysqlctl/command/init_config.go
new file mode 100644
index 00000000000..70e751e02cb
--- /dev/null
+++ b/go/cmd/mysqlctl/command/init_config.go
@@ -0,0 +1,57 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package command
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/vt/mysqlctl"
+)
+
+var InitConfig = &cobra.Command{
+ Use: "init_config",
+ Short: "Initializes the directory structure, creates my.cnf file, but does not start mysqld.",
+ Long: "Bootstraps the configuration for a new `mysqld` instance and initializes its data directory.\n" +
+ "This command is the same as `init` except the `mysqld` server will not be started.",
+ Example: `mysqlctl \
+ --alsologtostderr \
+ --tablet_uid 101 \
+ --mysql_port 12345 \
+ init_config`,
+ Args: cobra.NoArgs,
+ RunE: commandInitConfig,
+}
+
+func commandInitConfig(cmd *cobra.Command, args []string) error {
+ // Generate my.cnf from scratch and use it to find mysqld.
+ mysqld, cnf, err := mysqlctl.CreateMysqldAndMycnf(tabletUID, mysqlSocket, mysqlPort)
+ if err != nil {
+ return fmt.Errorf("failed to initialize mysql config: %v", err)
+ }
+ defer mysqld.Close()
+ if err := mysqld.InitConfig(cnf); err != nil {
+ return fmt.Errorf("failed to init mysql config: %v", err)
+ }
+
+ return nil
+}
+
+func init() {
+ Root.AddCommand(InitConfig)
+}
diff --git a/go/cmd/mysqlctl/plugin_prometheusbackend.go b/go/cmd/mysqlctl/command/plugin_prometheusbackend.go
similarity index 98%
rename from go/cmd/mysqlctl/plugin_prometheusbackend.go
rename to go/cmd/mysqlctl/command/plugin_prometheusbackend.go
index 62853982f11..7376af743a4 100644
--- a/go/cmd/mysqlctl/plugin_prometheusbackend.go
+++ b/go/cmd/mysqlctl/command/plugin_prometheusbackend.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package command
// This plugin imports Prometheus to allow for instrumentation
// with the Prometheus client library
diff --git a/go/cmd/mysqlctl/command/position.go b/go/cmd/mysqlctl/command/position.go
new file mode 100644
index 00000000000..46f848e1bbb
--- /dev/null
+++ b/go/cmd/mysqlctl/command/position.go
@@ -0,0 +1,74 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package command
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/mysql/replication"
+)
+
+var Position = &cobra.Command{
+ Use: "position ",
+ Short: "Compute operations on replication positions",
+ Args: cobra.MatchAll(cobra.ExactArgs(3), func(cmd *cobra.Command, args []string) error {
+ switch args[0] {
+ case "equal", "at_least", "append":
+ default:
+ return fmt.Errorf("invalid operation %s (choices are 'equal', 'at_least', 'append')", args[0])
+ }
+
+ return nil
+ }),
+ RunE: commandPosition,
+}
+
+func commandPosition(cmd *cobra.Command, args []string) error {
+ pos1, err := replication.DecodePosition(args[1])
+ if err != nil {
+ return err
+ }
+
+ switch args[0] {
+ case "equal":
+ pos2, err := replication.DecodePosition(args[2])
+ if err != nil {
+ return err
+ }
+ fmt.Println(pos1.Equal(pos2))
+ case "at_least":
+ pos2, err := replication.DecodePosition(args[2])
+ if err != nil {
+ return err
+ }
+ fmt.Println(pos1.AtLeast(pos2))
+ case "append":
+ gtid, err := replication.DecodeGTID(args[2])
+ if err != nil {
+ return err
+ }
+ fmt.Println(replication.AppendGTID(pos1, gtid))
+ }
+
+ return nil
+}
+
+func init() {
+ Root.AddCommand(Position)
+}
diff --git a/go/cmd/mysqlctl/command/reinit_config.go b/go/cmd/mysqlctl/command/reinit_config.go
new file mode 100644
index 00000000000..b06642c8203
--- /dev/null
+++ b/go/cmd/mysqlctl/command/reinit_config.go
@@ -0,0 +1,58 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package command
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/vt/mysqlctl"
+)
+
+var ReinitConfig = &cobra.Command{
+ Use: "reinit_config",
+ Short: "Reinitializes my.cnf file with new server_id.",
+ Long: "Regenerate new configuration files for an existing `mysqld` instance (generating new server_id and server_uuid values).\n" +
+ "This could be helpful to revert configuration changes, or to pick up changes made to the bundled config in newer Vitess versions.",
+ Example: `mysqlctl \
+ --alsologtostderr \
+ --tablet_uid 101 \
+ --mysql_port 12345 \
+ reinit_config`,
+ Args: cobra.NoArgs,
+ RunE: commandReinitConfig,
+}
+
+func commandReinitConfig(cmd *cobra.Command, args []string) error {
+ // There ought to be an existing my.cnf, so use it to find mysqld.
+ mysqld, cnf, err := mysqlctl.OpenMysqldAndMycnf(tabletUID)
+ if err != nil {
+ return fmt.Errorf("failed to find mysql config: %v", err)
+ }
+ defer mysqld.Close()
+
+ if err := mysqld.ReinitConfig(context.TODO(), cnf); err != nil {
+ return fmt.Errorf("failed to reinit mysql config: %v", err)
+ }
+ return nil
+}
+
+func init() {
+ Root.AddCommand(ReinitConfig)
+}
diff --git a/go/cmd/mysqlctl/command/root.go b/go/cmd/mysqlctl/command/root.go
new file mode 100644
index 00000000000..4f5626ef7e6
--- /dev/null
+++ b/go/cmd/mysqlctl/command/root.go
@@ -0,0 +1,77 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package command
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/acl"
+ vtcmd "vitess.io/vitess/go/cmd"
+ "vitess.io/vitess/go/vt/dbconfigs"
+ "vitess.io/vitess/go/vt/logutil"
+ "vitess.io/vitess/go/vt/servenv"
+)
+
+var (
+ mysqlPort = 3306
+ tabletUID = uint32(41983)
+ mysqlSocket string
+
+ Root = &cobra.Command{
+ Use: "mysqlctl",
+ Short: "mysqlctl initializes and controls mysqld with Vitess-specific configuration.",
+ Long: "`mysqlctl` is a command-line client used for managing `mysqld` instances.\n\n" +
+
+ "It is responsible for bootstrapping tasks such as generating a configuration file for `mysqld` and initializing the instance and its data directory.\n" +
+ "The `mysqld_safe` watchdog is utilized when present.\n" +
+ "This helps ensure that `mysqld` is automatically restarted after failures.",
+ PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
+ if err := servenv.CobraPreRunE(cmd, args); err != nil {
+ return nil
+ }
+
+ if vtcmd.IsRunningAsRoot() {
+ return fmt.Errorf("mysqlctl cannot be run as root. Please run as a different user")
+ }
+
+ return nil
+ },
+ PersistentPostRun: func(cmd *cobra.Command, args []string) {
+ logutil.Flush()
+ },
+ Version: servenv.AppVersion.String(),
+ }
+)
+
+func init() {
+ servenv.RegisterDefaultSocketFileFlags()
+ servenv.RegisterFlags()
+ servenv.RegisterServiceMapFlag()
+
+ // mysqlctl only starts and stops mysql, only needs dba.
+ dbconfigs.RegisterFlags(dbconfigs.Dba)
+
+ servenv.MovePersistentFlagsToCobraCommand(Root)
+
+ Root.PersistentFlags().IntVar(&mysqlPort, "mysql_port", mysqlPort, "MySQL port.")
+ Root.PersistentFlags().Uint32Var(&tabletUID, "tablet_uid", tabletUID, "Tablet UID.")
+ Root.PersistentFlags().StringVar(&mysqlSocket, "mysql_socket", mysqlSocket, "Path to the mysqld socket file.")
+
+ acl.RegisterFlags(Root.PersistentFlags())
+}
diff --git a/go/cmd/mysqlctl/command/shutdown.go b/go/cmd/mysqlctl/command/shutdown.go
new file mode 100644
index 00000000000..41c804856eb
--- /dev/null
+++ b/go/cmd/mysqlctl/command/shutdown.go
@@ -0,0 +1,66 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package command
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/vt/mysqlctl"
+)
+
+var Shutdown = &cobra.Command{
+ Use: "shutdown",
+ Short: "Shuts down mysqld, without removing any files.",
+ Long: "Stop a `mysqld` instance that was previously started with `init` or `start`.\n\n" +
+
+ "For large `mysqld` instances, you may need to extend the `wait_time` to shutdown cleanly.",
+ Example: `mysqlctl --tablet_uid 101 --alsologtostderr shutdown`,
+ Args: cobra.NoArgs,
+ RunE: commandShutdown,
+}
+
+var shutdownArgs = struct {
+ WaitTime time.Duration
+}{
+ WaitTime: 5 * time.Minute,
+}
+
+func commandShutdown(cmd *cobra.Command, args []string) error {
+ // There ought to be an existing my.cnf, so use it to find mysqld.
+ mysqld, cnf, err := mysqlctl.OpenMysqldAndMycnf(tabletUID)
+ if err != nil {
+ return fmt.Errorf("failed to find mysql config: %v", err)
+ }
+ defer mysqld.Close()
+
+ ctx, cancel := context.WithTimeout(context.Background(), shutdownArgs.WaitTime)
+ defer cancel()
+ if err := mysqld.Shutdown(ctx, cnf, true); err != nil {
+ return fmt.Errorf("failed shutdown mysql: %v", err)
+ }
+ return nil
+}
+
+func init() {
+ Shutdown.Flags().DurationVar(&shutdownArgs.WaitTime, "wait_time", shutdownArgs.WaitTime, "How long to wait for mysqld shutdown.")
+
+ Root.AddCommand(Shutdown)
+}
diff --git a/go/cmd/mysqlctl/command/start.go b/go/cmd/mysqlctl/command/start.go
new file mode 100644
index 00000000000..397909e0966
--- /dev/null
+++ b/go/cmd/mysqlctl/command/start.go
@@ -0,0 +1,67 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package command
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/flagutil"
+ "vitess.io/vitess/go/vt/mysqlctl"
+)
+
+var Start = &cobra.Command{
+ Use: "start",
+ Short: "Starts mysqld on an already 'init'-ed directory.",
+ Long: "Resume an existing `mysqld` instance that was previously bootstrapped with `init` or `init_config`",
+ Example: `mysqlctl --tablet_uid 101 --alsologtostderr start`,
+ Args: cobra.NoArgs,
+ RunE: commandStart,
+}
+
+var startArgs = struct {
+ WaitTime time.Duration
+ MySQLdArgs flagutil.StringListValue
+}{
+ WaitTime: 5 * time.Minute,
+}
+
+func commandStart(cmd *cobra.Command, args []string) error {
+ // There ought to be an existing my.cnf, so use it to find mysqld.
+ mysqld, cnf, err := mysqlctl.OpenMysqldAndMycnf(tabletUID)
+ if err != nil {
+ return fmt.Errorf("failed to find mysql config: %v", err)
+ }
+ defer mysqld.Close()
+
+ ctx, cancel := context.WithTimeout(context.Background(), startArgs.WaitTime)
+ defer cancel()
+ if err := mysqld.Start(ctx, cnf, startArgs.MySQLdArgs...); err != nil {
+ return fmt.Errorf("failed start mysql: %v", err)
+ }
+ return nil
+}
+
+func init() {
+ Start.Flags().DurationVar(&startArgs.WaitTime, "wait_time", startArgs.WaitTime, "How long to wait for mysqld startup.")
+ Start.Flags().Var(&startArgs.MySQLdArgs, "mysqld_args", "List of comma-separated flags to pass additionally to mysqld.")
+
+ Root.AddCommand(Start)
+}
diff --git a/go/cmd/mysqlctl/command/teardown.go b/go/cmd/mysqlctl/command/teardown.go
new file mode 100644
index 00000000000..0d37a15cfdc
--- /dev/null
+++ b/go/cmd/mysqlctl/command/teardown.go
@@ -0,0 +1,70 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package command
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/vt/mysqlctl"
+)
+
+var Teardown = &cobra.Command{
+ Use: "teardown",
+ Short: "Shuts mysqld down and removes the directory.",
+ Long: "{{< warning >}}\n" +
+ "This is a destructive operation.\n" +
+ "{{ warning >}}\n\n" +
+
+ "Shuts down a `mysqld` instance and removes its data directory.",
+ Example: `mysqlctl --tablet_uid 101 --alsologtostderr teardown`,
+ Args: cobra.NoArgs,
+ RunE: commandTeardown,
+}
+
+var teardownArgs = struct {
+ WaitTime time.Duration
+ Force bool
+}{
+ WaitTime: 5 * time.Minute,
+}
+
+func commandTeardown(cmd *cobra.Command, args []string) error {
+ // There ought to be an existing my.cnf, so use it to find mysqld.
+ mysqld, cnf, err := mysqlctl.OpenMysqldAndMycnf(tabletUID)
+ if err != nil {
+ return fmt.Errorf("failed to find mysql config: %v", err)
+ }
+ defer mysqld.Close()
+
+ ctx, cancel := context.WithTimeout(context.Background(), teardownArgs.WaitTime)
+ defer cancel()
+ if err := mysqld.Teardown(ctx, cnf, teardownArgs.Force); err != nil {
+ return fmt.Errorf("failed teardown mysql (forced? %v): %v", teardownArgs.Force, err)
+ }
+ return nil
+}
+
+func init() {
+ Teardown.Flags().DurationVar(&teardownArgs.WaitTime, "wait_time", teardownArgs.WaitTime, "How long to wait for mysqld shutdown.")
+ Teardown.Flags().BoolVarP(&teardownArgs.Force, "force", "f", teardownArgs.Force, "Remove the root directory even if mysqld shutdown fails.")
+
+ Root.AddCommand(Teardown)
+}
diff --git a/go/cmd/mysqlctl/docgen/main.go b/go/cmd/mysqlctl/docgen/main.go
new file mode 100644
index 00000000000..2162b5e8551
--- /dev/null
+++ b/go/cmd/mysqlctl/docgen/main.go
@@ -0,0 +1,37 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/cmd/internal/docgen"
+ "vitess.io/vitess/go/cmd/mysqlctl/command"
+)
+
+func main() {
+ var dir string
+ cmd := cobra.Command{
+ Use: "docgen [-d ]",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return docgen.GenerateMarkdownTree(command.Root, dir)
+ },
+ }
+
+ cmd.Flags().StringVarP(&dir, "dir", "d", "doc", "output directory to write documentation")
+ _ = cmd.Execute()
+}
diff --git a/go/cmd/mysqlctl/mysqlctl.go b/go/cmd/mysqlctl/mysqlctl.go
index 6873cc2bf56..72198c2c8c0 100644
--- a/go/cmd/mysqlctl/mysqlctl.go
+++ b/go/cmd/mysqlctl/mysqlctl.go
@@ -18,268 +18,12 @@ limitations under the License.
package main
import (
- "context"
- "fmt"
- "os"
- "time"
-
- "github.com/spf13/pflag"
-
- "vitess.io/vitess/go/acl"
- "vitess.io/vitess/go/cmd"
- "vitess.io/vitess/go/exit"
- "vitess.io/vitess/go/flagutil"
- "vitess.io/vitess/go/mysql"
- "vitess.io/vitess/go/vt/dbconfigs"
+ "vitess.io/vitess/go/cmd/mysqlctl/command"
"vitess.io/vitess/go/vt/log"
- "vitess.io/vitess/go/vt/logutil"
- "vitess.io/vitess/go/vt/mysqlctl"
- "vitess.io/vitess/go/vt/servenv"
-)
-
-var (
- mysqlPort = 3306
- tabletUID = uint32(41983)
- mysqlSocket string
)
-func init() {
- servenv.RegisterDefaultSocketFileFlags()
- servenv.RegisterFlags()
- servenv.RegisterServiceMapFlag()
- // mysqlctl only starts and stops mysql, only needs dba.
- dbconfigs.RegisterFlags(dbconfigs.Dba)
- servenv.OnParse(func(fs *pflag.FlagSet) {
- fs.IntVar(&mysqlPort, "mysql_port", mysqlPort, "MySQL port")
- fs.Uint32Var(&tabletUID, "tablet_uid", tabletUID, "Tablet UID")
- fs.StringVar(&mysqlSocket, "mysql_socket", mysqlSocket, "Path to the mysqld socket file")
-
- acl.RegisterFlags(fs)
- })
-}
-
-func initConfigCmd(subFlags *pflag.FlagSet, args []string) error {
- _ = subFlags.Parse(args)
-
- // Generate my.cnf from scratch and use it to find mysqld.
- mysqld, cnf, err := mysqlctl.CreateMysqldAndMycnf(tabletUID, mysqlSocket, mysqlPort)
- if err != nil {
- return fmt.Errorf("failed to initialize mysql config: %v", err)
- }
- defer mysqld.Close()
- if err := mysqld.InitConfig(cnf); err != nil {
- return fmt.Errorf("failed to init mysql config: %v", err)
- }
- return nil
-}
-
-func initCmd(subFlags *pflag.FlagSet, args []string) error {
- waitTime := subFlags.Duration("wait_time", 5*time.Minute, "How long to wait for mysqld startup")
- initDBSQLFile := subFlags.String("init_db_sql_file", "", "Path to .sql file to run after mysqld initiliaztion")
- _ = subFlags.Parse(args)
-
- // Generate my.cnf from scratch and use it to find mysqld.
- mysqld, cnf, err := mysqlctl.CreateMysqldAndMycnf(tabletUID, mysqlSocket, mysqlPort)
- if err != nil {
- return fmt.Errorf("failed to initialize mysql config: %v", err)
- }
- defer mysqld.Close()
-
- ctx, cancel := context.WithTimeout(context.Background(), *waitTime)
- defer cancel()
- if err := mysqld.Init(ctx, cnf, *initDBSQLFile); err != nil {
- return fmt.Errorf("failed init mysql: %v", err)
- }
- return nil
-}
-
-func reinitConfigCmd(subFlags *pflag.FlagSet, args []string) error {
- _ = subFlags.Parse(args)
-
- // There ought to be an existing my.cnf, so use it to find mysqld.
- mysqld, cnf, err := mysqlctl.OpenMysqldAndMycnf(tabletUID)
- if err != nil {
- return fmt.Errorf("failed to find mysql config: %v", err)
- }
- defer mysqld.Close()
-
- if err := mysqld.ReinitConfig(context.TODO(), cnf); err != nil {
- return fmt.Errorf("failed to reinit mysql config: %v", err)
- }
- return nil
-}
-
-func shutdownCmd(subFlags *pflag.FlagSet, args []string) error {
- waitTime := subFlags.Duration("wait_time", 5*time.Minute, "How long to wait for mysqld shutdown")
- _ = subFlags.Parse(args)
-
- // There ought to be an existing my.cnf, so use it to find mysqld.
- mysqld, cnf, err := mysqlctl.OpenMysqldAndMycnf(tabletUID)
- if err != nil {
- return fmt.Errorf("failed to find mysql config: %v", err)
- }
- defer mysqld.Close()
-
- ctx, cancel := context.WithTimeout(context.Background(), *waitTime)
- defer cancel()
- if err := mysqld.Shutdown(ctx, cnf, true); err != nil {
- return fmt.Errorf("failed shutdown mysql: %v", err)
- }
- return nil
-}
-
-func startCmd(subFlags *pflag.FlagSet, args []string) error {
- waitTime := subFlags.Duration("wait_time", 5*time.Minute, "How long to wait for mysqld startup")
- var mysqldArgs flagutil.StringListValue
- subFlags.Var(&mysqldArgs, "mysqld_args", "List of comma-separated flags to pass additionally to mysqld")
- _ = subFlags.Parse(args)
-
- // There ought to be an existing my.cnf, so use it to find mysqld.
- mysqld, cnf, err := mysqlctl.OpenMysqldAndMycnf(tabletUID)
- if err != nil {
- return fmt.Errorf("failed to find mysql config: %v", err)
- }
- defer mysqld.Close()
-
- ctx, cancel := context.WithTimeout(context.Background(), *waitTime)
- defer cancel()
- if err := mysqld.Start(ctx, cnf, mysqldArgs...); err != nil {
- return fmt.Errorf("failed start mysql: %v", err)
- }
- return nil
-}
-
-func teardownCmd(subFlags *pflag.FlagSet, args []string) error {
- waitTime := subFlags.Duration("wait_time", 5*time.Minute, "How long to wait for mysqld shutdown")
- force := subFlags.Bool("force", false, "Remove the root directory even if mysqld shutdown fails")
- _ = subFlags.Parse(args)
-
- // There ought to be an existing my.cnf, so use it to find mysqld.
- mysqld, cnf, err := mysqlctl.OpenMysqldAndMycnf(tabletUID)
- if err != nil {
- return fmt.Errorf("failed to find mysql config: %v", err)
- }
- defer mysqld.Close()
-
- ctx, cancel := context.WithTimeout(context.Background(), *waitTime)
- defer cancel()
- if err := mysqld.Teardown(ctx, cnf, *force); err != nil {
- return fmt.Errorf("failed teardown mysql (forced? %v): %v", *force, err)
- }
- return nil
-}
-
-func positionCmd(subFlags *pflag.FlagSet, args []string) error {
- _ = subFlags.Parse(args)
- if len(args) < 3 {
- return fmt.Errorf("not enough arguments for position operation")
- }
-
- pos1, err := mysql.DecodePosition(args[1])
- if err != nil {
- return err
- }
-
- switch args[0] {
- case "equal":
- pos2, err := mysql.DecodePosition(args[2])
- if err != nil {
- return err
- }
- fmt.Println(pos1.Equal(pos2))
- case "at_least":
- pos2, err := mysql.DecodePosition(args[2])
- if err != nil {
- return err
- }
- fmt.Println(pos1.AtLeast(pos2))
- case "append":
- gtid, err := mysql.DecodeGTID(args[2])
- if err != nil {
- return err
- }
- fmt.Println(mysql.AppendGTID(pos1, gtid))
- }
-
- return nil
-}
-
-type command struct {
- name string
- method func(*pflag.FlagSet, []string) error
- params string
- help string
-}
-
-var commands = []command{
- {"init", initCmd, "[--wait_time=5m] [--init_db_sql_file=]",
- "Initializes the directory structure and starts mysqld"},
- {"init_config", initConfigCmd, "",
- "Initializes the directory structure, creates my.cnf file, but does not start mysqld"},
- {"reinit_config", reinitConfigCmd, "",
- "Reinitializes my.cnf file with new server_id"},
- {"teardown", teardownCmd, "[--wait_time=5m] [--force]",
- "Shuts mysqld down, and removes the directory"},
- {"start", startCmd, "[--wait_time=5m]",
- "Starts mysqld on an already 'init'-ed directory"},
- {"shutdown", shutdownCmd, "[--wait_time=5m]",
- "Shuts down mysqld, does not remove any file"},
-
- {"position", positionCmd,
- " ",
- "Compute operations on replication positions"},
-}
-
func main() {
- defer exit.Recover()
- defer logutil.Flush()
-
- fs := pflag.NewFlagSet("mysqlctl", pflag.ExitOnError)
- log.RegisterFlags(fs)
- logutil.RegisterFlags(fs)
- pflag.Usage = func() {
- w := os.Stderr
- fmt.Fprintf(w, "Usage: %s [global-flags] -- [command-flags]\n", os.Args[0])
- fmt.Fprintf(w, "\nThe commands are listed below. Use '%s -- {-h, --help}' for command help.\n\n", os.Args[0])
- for _, cmd := range commands {
- fmt.Fprintf(w, " %s", cmd.name)
- if cmd.params != "" {
- fmt.Fprintf(w, " %s", cmd.params)
- }
- fmt.Fprintf(w, "\n")
- }
- fmt.Fprintf(w, "\nGlobal flags:\n")
- pflag.PrintDefaults()
- }
- args := servenv.ParseFlagsWithArgs("mysqlctl")
-
- if cmd.IsRunningAsRoot() {
- fmt.Fprintln(os.Stderr, "mysqlctl cannot be ran as root. Please run as a different user")
- exit.Return(1)
- }
-
- action := args[0]
- for _, cmd := range commands {
- if cmd.name == action {
- subFlags := pflag.NewFlagSet(action, pflag.ExitOnError)
- subFlags.Usage = func() {
- w := os.Stderr
- fmt.Fprintf(w, "Usage: %s %s %s\n\n", os.Args[0], cmd.name, cmd.params)
- fmt.Fprintf(w, cmd.help)
- fmt.Fprintf(w, "\n\n")
- subFlags.PrintDefaults()
- }
- // This is logged and we want sentence capitalization and punctuation.
- pflag.ErrHelp = fmt.Errorf("\nSee %s --help for more information.", os.Args[0]) // nolint:revive
- if err := cmd.method(subFlags, args[1:]); err != nil {
- log.Errorf("%v\n", err)
- subFlags.Usage()
- exit.Return(1)
- }
- return
- }
+ if err := command.Root.Execute(); err != nil {
+ log.Exit(err)
}
- log.Errorf("invalid action: %v\n\n", action)
- pflag.Usage()
- exit.Return(1)
}
diff --git a/go/cmd/mysqlctld/cli/mysqlctld.go b/go/cmd/mysqlctld/cli/mysqlctld.go
new file mode 100644
index 00000000000..6ebaa5dc422
--- /dev/null
+++ b/go/cmd/mysqlctld/cli/mysqlctld.go
@@ -0,0 +1,178 @@
+/*
+Copyright 2019 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// mysqlctld is a daemon that starts or initializes mysqld and provides an RPC
+// interface for vttablet to stop and start mysqld from a different container
+// without having to restart the container running mysqlctld.
+package cli
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "time"
+
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/acl"
+ "vitess.io/vitess/go/vt/dbconfigs"
+ "vitess.io/vitess/go/vt/log"
+ "vitess.io/vitess/go/vt/logutil"
+ "vitess.io/vitess/go/vt/mysqlctl"
+ "vitess.io/vitess/go/vt/servenv"
+)
+
+var (
+ // mysqld is used by the rpc implementation plugin.
+ mysqld *mysqlctl.Mysqld
+ cnf *mysqlctl.Mycnf
+
+ mysqlPort = 3306
+ tabletUID = uint32(41983)
+ mysqlSocket string
+
+ // mysqlctl init flags
+ waitTime = 5 * time.Minute
+ initDBSQLFile string
+
+ Main = &cobra.Command{
+ Use: "mysqlctld",
+ Short: "mysqlctld is a daemon that starts or initializes mysqld.",
+ Long: "`mysqlctld` is a gRPC server that can be used instead of the `mysqlctl` client tool.\n" +
+ "If the target directories are empty when it is invoked, it automatically performs initialization operations to bootstrap the `mysqld` instance before starting it.\n" +
+ "The `mysqlctld` process can subsequently receive gRPC commands from a `vttablet` to perform housekeeping operations like shutting down and restarting the `mysqld` instance as needed.\n\n" +
+ "{{< warning >}}\n" +
+ "`mysqld_safe` is not used so the `mysqld` process will not be automatically restarted in case of a failure.\n" +
+ "{{ warning>}}\n\n" +
+ "To enable communication with a `vttablet`, the server must be configured to receive gRPC messages on a unix domain socket.",
+ Example: `mysqlctld \
+ --log_dir=${VTDATAROOT}/logs \
+ --tablet_uid=100 \
+ --mysql_port=17100 \
+ --socket_file=/path/to/socket_file`,
+ Args: cobra.NoArgs,
+ PreRunE: servenv.CobraPreRunE,
+ RunE: run,
+ }
+)
+
+func init() {
+ servenv.RegisterDefaultFlags()
+ servenv.RegisterDefaultSocketFileFlags()
+ servenv.RegisterFlags()
+ servenv.RegisterGRPCServerFlags()
+ servenv.RegisterGRPCServerAuthFlags()
+ servenv.RegisterServiceMapFlag()
+ // mysqlctld only starts and stops mysql, only needs dba.
+ dbconfigs.RegisterFlags(dbconfigs.Dba)
+
+ servenv.MoveFlagsToCobraCommand(Main)
+
+ Main.Flags().IntVar(&mysqlPort, "mysql_port", mysqlPort, "MySQL port")
+ Main.Flags().Uint32Var(&tabletUID, "tablet_uid", tabletUID, "Tablet UID")
+ Main.Flags().StringVar(&mysqlSocket, "mysql_socket", mysqlSocket, "Path to the mysqld socket file")
+ Main.Flags().DurationVar(&waitTime, "wait_time", waitTime, "How long to wait for mysqld startup or shutdown")
+ Main.Flags().StringVar(&initDBSQLFile, "init_db_sql_file", initDBSQLFile, "Path to .sql file to run after mysqld initialization")
+
+ acl.RegisterFlags(Main.Flags())
+}
+
+func run(cmd *cobra.Command, args []string) error {
+ defer logutil.Flush()
+
+ // We'll register this OnTerm handler before mysqld starts, so we get notified
+ // if mysqld dies on its own without us (or our RPC client) telling it to.
+ mysqldTerminated := make(chan struct{})
+ onTermFunc := func() {
+ close(mysqldTerminated)
+ }
+
+ // Start or Init mysqld as needed.
+ ctx, cancel := context.WithTimeout(context.Background(), waitTime)
+ mycnfFile := mysqlctl.MycnfFile(tabletUID)
+ if _, statErr := os.Stat(mycnfFile); os.IsNotExist(statErr) {
+ // Generate my.cnf from scratch and use it to find mysqld.
+ log.Infof("mycnf file (%s) doesn't exist, initializing", mycnfFile)
+
+ var err error
+ mysqld, cnf, err = mysqlctl.CreateMysqldAndMycnf(tabletUID, mysqlSocket, mysqlPort)
+ if err != nil {
+ cancel()
+ return fmt.Errorf("failed to initialize mysql config: %w", err)
+ }
+ mysqld.OnTerm(onTermFunc)
+
+ if err := mysqld.Init(ctx, cnf, initDBSQLFile); err != nil {
+ cancel()
+ return fmt.Errorf("failed to initialize mysql data dir and start mysqld: %w", err)
+ }
+ } else {
+ // There ought to be an existing my.cnf, so use it to find mysqld.
+ log.Infof("mycnf file (%s) already exists, starting without init", mycnfFile)
+
+ var err error
+ mysqld, cnf, err = mysqlctl.OpenMysqldAndMycnf(tabletUID)
+ if err != nil {
+ cancel()
+ return fmt.Errorf("failed to find mysql config: %w", err)
+ }
+ mysqld.OnTerm(onTermFunc)
+
+ err = mysqld.RefreshConfig(ctx, cnf)
+ if err != nil {
+ cancel()
+ return fmt.Errorf("failed to refresh config: %w", err)
+ }
+
+ // check if we were interrupted during a previous restore
+ if !mysqlctl.RestoreWasInterrupted(cnf) {
+ if err := mysqld.Start(ctx, cnf); err != nil {
+ cancel()
+ return fmt.Errorf("failed to start mysqld: %w", err)
+ }
+ } else {
+ log.Infof("found interrupted restore, not starting mysqld")
+ }
+ }
+ cancel()
+
+ servenv.Init()
+
+ // Take mysqld down with us on SIGTERM before entering lame duck.
+ servenv.OnTermSync(func() {
+ log.Infof("mysqlctl received SIGTERM, shutting down mysqld first")
+ ctx := context.Background()
+ if err := mysqld.Shutdown(ctx, cnf, true); err != nil {
+ log.Errorf("failed to shutdown mysqld: %v", err)
+ }
+ })
+
+ // Start RPC server and wait for SIGTERM.
+ mysqlctldTerminated := make(chan struct{})
+ go func() {
+ servenv.RunDefault()
+ close(mysqlctldTerminated)
+ }()
+
+ select {
+ case <-mysqldTerminated:
+ log.Infof("mysqld shut down on its own, exiting mysqlctld")
+ case <-mysqlctldTerminated:
+ log.Infof("mysqlctld shut down gracefully")
+ }
+
+ return nil
+}
diff --git a/go/cmd/mysqlctld/plugin_grpcmysqlctlserver.go b/go/cmd/mysqlctld/cli/plugin_grpcmysqlctlserver.go
similarity index 98%
rename from go/cmd/mysqlctld/plugin_grpcmysqlctlserver.go
rename to go/cmd/mysqlctld/cli/plugin_grpcmysqlctlserver.go
index ee81ab77515..1186d5ed788 100644
--- a/go/cmd/mysqlctld/plugin_grpcmysqlctlserver.go
+++ b/go/cmd/mysqlctld/cli/plugin_grpcmysqlctlserver.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
// Import and register the gRPC mysqlctl server
diff --git a/go/cmd/mysqlctld/plugin_prometheusbackend.go b/go/cmd/mysqlctld/cli/plugin_prometheusbackend.go
similarity index 98%
rename from go/cmd/mysqlctld/plugin_prometheusbackend.go
rename to go/cmd/mysqlctld/cli/plugin_prometheusbackend.go
index 4ae114ceedd..e01ecf0bead 100644
--- a/go/cmd/mysqlctld/plugin_prometheusbackend.go
+++ b/go/cmd/mysqlctld/cli/plugin_prometheusbackend.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
// This plugin imports Prometheus to allow for instrumentation
// with the Prometheus client library
diff --git a/go/cmd/mysqlctld/docgen/main.go b/go/cmd/mysqlctld/docgen/main.go
new file mode 100644
index 00000000000..4c920fa46e0
--- /dev/null
+++ b/go/cmd/mysqlctld/docgen/main.go
@@ -0,0 +1,37 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/cmd/internal/docgen"
+ "vitess.io/vitess/go/cmd/mysqlctld/cli"
+)
+
+func main() {
+ var dir string
+ cmd := cobra.Command{
+ Use: "docgen [-d ]",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return docgen.GenerateMarkdownTree(cli.Main, dir)
+ },
+ }
+
+ cmd.Flags().StringVarP(&dir, "dir", "d", "doc", "output directory to write documentation")
+ _ = cmd.Execute()
+}
diff --git a/go/cmd/mysqlctld/mysqlctld.go b/go/cmd/mysqlctld/mysqlctld.go
index 39b9ac11490..5843c5a15e1 100644
--- a/go/cmd/mysqlctld/mysqlctld.go
+++ b/go/cmd/mysqlctld/mysqlctld.go
@@ -20,140 +20,12 @@ limitations under the License.
package main
import (
- "context"
- "os"
- "time"
-
- "github.com/spf13/pflag"
-
- "vitess.io/vitess/go/acl"
- "vitess.io/vitess/go/exit"
- "vitess.io/vitess/go/vt/dbconfigs"
+ "vitess.io/vitess/go/cmd/mysqlctld/cli"
"vitess.io/vitess/go/vt/log"
- "vitess.io/vitess/go/vt/logutil"
- "vitess.io/vitess/go/vt/mysqlctl"
- "vitess.io/vitess/go/vt/servenv"
-)
-
-var (
- // mysqld is used by the rpc implementation plugin.
- mysqld *mysqlctl.Mysqld
- cnf *mysqlctl.Mycnf
-
- mysqlPort = 3306
- tabletUID = uint32(41983)
- mysqlSocket string
-
- // mysqlctl init flags
- waitTime = 5 * time.Minute
- initDBSQLFile string
)
-func init() {
- servenv.RegisterDefaultFlags()
- servenv.RegisterDefaultSocketFileFlags()
- servenv.RegisterFlags()
- servenv.RegisterGRPCServerFlags()
- servenv.RegisterGRPCServerAuthFlags()
- servenv.RegisterServiceMapFlag()
- // mysqlctld only starts and stops mysql, only needs dba.
- dbconfigs.RegisterFlags(dbconfigs.Dba)
- servenv.OnParse(func(fs *pflag.FlagSet) {
- fs.IntVar(&mysqlPort, "mysql_port", mysqlPort, "MySQL port")
- fs.Uint32Var(&tabletUID, "tablet_uid", tabletUID, "Tablet UID")
- fs.StringVar(&mysqlSocket, "mysql_socket", mysqlSocket, "Path to the mysqld socket file")
- fs.DurationVar(&waitTime, "wait_time", waitTime, "How long to wait for mysqld startup or shutdown")
- fs.StringVar(&initDBSQLFile, "init_db_sql_file", initDBSQLFile, "Path to .sql file to run after mysqld initialization")
-
- acl.RegisterFlags(fs)
- })
-}
-
func main() {
- defer exit.Recover()
- defer logutil.Flush()
-
- servenv.ParseFlags("mysqlctld")
-
- // We'll register this OnTerm handler before mysqld starts, so we get notified
- // if mysqld dies on its own without us (or our RPC client) telling it to.
- mysqldTerminated := make(chan struct{})
- onTermFunc := func() {
- close(mysqldTerminated)
- }
-
- // Start or Init mysqld as needed.
- ctx, cancel := context.WithTimeout(context.Background(), waitTime)
- mycnfFile := mysqlctl.MycnfFile(tabletUID)
- if _, statErr := os.Stat(mycnfFile); os.IsNotExist(statErr) {
- // Generate my.cnf from scratch and use it to find mysqld.
- log.Infof("mycnf file (%s) doesn't exist, initializing", mycnfFile)
-
- var err error
- mysqld, cnf, err = mysqlctl.CreateMysqldAndMycnf(tabletUID, mysqlSocket, mysqlPort)
- if err != nil {
- log.Errorf("failed to initialize mysql config: %v", err)
- exit.Return(1)
- }
- mysqld.OnTerm(onTermFunc)
-
- if err := mysqld.Init(ctx, cnf, initDBSQLFile); err != nil {
- log.Errorf("failed to initialize mysql data dir and start mysqld: %v", err)
- exit.Return(1)
- }
- } else {
- // There ought to be an existing my.cnf, so use it to find mysqld.
- log.Infof("mycnf file (%s) already exists, starting without init", mycnfFile)
-
- var err error
- mysqld, cnf, err = mysqlctl.OpenMysqldAndMycnf(tabletUID)
- if err != nil {
- log.Errorf("failed to find mysql config: %v", err)
- exit.Return(1)
- }
- mysqld.OnTerm(onTermFunc)
-
- err = mysqld.RefreshConfig(ctx, cnf)
- if err != nil {
- log.Errorf("failed to refresh config: %v", err)
- exit.Return(1)
- }
-
- // check if we were interrupted during a previous restore
- if !mysqlctl.RestoreWasInterrupted(cnf) {
- if err := mysqld.Start(ctx, cnf); err != nil {
- log.Errorf("failed to start mysqld: %v", err)
- exit.Return(1)
- }
- } else {
- log.Infof("found interrupted restore, not starting mysqld")
- }
- }
- cancel()
-
- servenv.Init()
- defer servenv.Close()
-
- // Take mysqld down with us on SIGTERM before entering lame duck.
- servenv.OnTermSync(func() {
- log.Infof("mysqlctl received SIGTERM, shutting down mysqld first")
- ctx := context.Background()
- if err := mysqld.Shutdown(ctx, cnf, true); err != nil {
- log.Errorf("failed to shutdown mysqld: %v", err)
- }
- })
-
- // Start RPC server and wait for SIGTERM.
- mysqlctldTerminated := make(chan struct{})
- go func() {
- servenv.RunDefault()
- close(mysqlctldTerminated)
- }()
-
- select {
- case <-mysqldTerminated:
- log.Infof("mysqld shut down on its own, exiting mysqlctld")
- case <-mysqlctldTerminated:
- log.Infof("mysqlctld shut down gracefully")
+ if err := cli.Main.Execute(); err != nil {
+ log.Exit(err)
}
}
diff --git a/go/cmd/query_analyzer/query_analyzer.go b/go/cmd/query_analyzer/query_analyzer.go
deleted file mode 100644
index 2138bde2673..00000000000
--- a/go/cmd/query_analyzer/query_analyzer.go
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
-Copyright 2019 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package main
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "io"
- "os"
- "sort"
-
- "github.com/spf13/pflag"
-
- "vitess.io/vitess/go/acl"
- "vitess.io/vitess/go/exit"
- "vitess.io/vitess/go/vt/log"
- "vitess.io/vitess/go/vt/logutil"
- "vitess.io/vitess/go/vt/servenv"
- "vitess.io/vitess/go/vt/sqlparser"
-
- // Include deprecation warnings for soon-to-be-unsupported flag invocations.
- _flag "vitess.io/vitess/go/internal/flag"
-)
-
-var (
- ignores = [][]byte{
- []byte("#"),
- []byte("/*"),
- []byte("SET"),
- []byte("use"),
- []byte("BEGIN"),
- []byte("COMMIT"),
- []byte("ROLLBACK"),
- }
- bindIndex = 0
- queries = make(map[string]int)
-)
-
-type stat struct {
- Query string
- Count int
-}
-
-type stats []stat
-
-func (a stats) Len() int { return len(a) }
-func (a stats) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-func (a stats) Less(i, j int) bool { return a[i].Count > a[j].Count }
-
-func main() {
- defer exit.Recover()
- fs := pflag.NewFlagSet("query_analyzer", pflag.ExitOnError)
- log.RegisterFlags(fs)
- logutil.RegisterFlags(fs)
- acl.RegisterFlags(fs)
- servenv.RegisterMySQLServerFlags(fs)
- _flag.Parse(fs)
- logutil.PurgeLogs()
- for _, filename := range _flag.Args() {
- fmt.Printf("processing: %s\n", filename)
- if err := processFile(filename); err != nil {
- log.Errorf("processFile error: %v", err)
- exit.Return(1)
- }
- }
- var stats = make(stats, 0, 128)
- for k, v := range queries {
- stats = append(stats, stat{Query: k, Count: v})
- }
- sort.Sort(stats)
- for _, s := range stats {
- fmt.Printf("%d: %s\n", s.Count, s.Query)
- }
-}
-
-func processFile(filename string) error {
- f, err := os.Open(filename)
- if err != nil {
- return err
- }
- r := bufio.NewReader(f)
- for {
- line, err := r.ReadBytes('\n')
- if err != nil {
- if err == io.EOF {
- break
- }
- return err
- }
- analyze(line)
- }
- return nil
-}
-
-func analyze(line []byte) {
- for _, ignore := range ignores {
- if bytes.HasPrefix(line, ignore) {
- return
- }
- }
- dml := string(bytes.TrimRight(line, "\n"))
- ast, err := sqlparser.Parse(dml)
- if err != nil {
- log.Errorf("Error parsing %s", dml)
- return
- }
- bindIndex = 0
- buf := sqlparser.NewTrackedBuffer(formatWithBind)
- buf.Myprintf("%v", ast)
- addQuery(buf.ParsedQuery().Query)
-}
-
-func formatWithBind(buf *sqlparser.TrackedBuffer, node sqlparser.SQLNode) {
- v, ok := node.(*sqlparser.Literal)
- if !ok {
- node.Format(buf)
- return
- }
- switch v.Type {
- case sqlparser.StrVal, sqlparser.HexVal, sqlparser.IntVal:
- buf.WriteArg(":", fmt.Sprintf("v%d", bindIndex))
- bindIndex++
- default:
- node.Format(buf)
- }
-}
-
-func addQuery(query string) {
- count, ok := queries[query]
- if !ok {
- count = 0
- }
- queries[query] = count + 1
-}
diff --git a/go/cmd/vtorc/plugin_consultopo.go b/go/cmd/topo2topo/cli/plugin_consultopo.go
similarity index 98%
rename from go/cmd/vtorc/plugin_consultopo.go
rename to go/cmd/topo2topo/cli/plugin_consultopo.go
index 59d6774fdbc..a128f294a42 100644
--- a/go/cmd/vtorc/plugin_consultopo.go
+++ b/go/cmd/topo2topo/cli/plugin_consultopo.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
// This plugin imports consultopo to register the consul implementation of TopoServer.
diff --git a/go/cmd/vtorc/plugin_etcd2topo.go b/go/cmd/topo2topo/cli/plugin_etcd2topo.go
similarity index 98%
rename from go/cmd/vtorc/plugin_etcd2topo.go
rename to go/cmd/topo2topo/cli/plugin_etcd2topo.go
index d99ef51d4af..5a51923cf00 100644
--- a/go/cmd/vtorc/plugin_etcd2topo.go
+++ b/go/cmd/topo2topo/cli/plugin_etcd2topo.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
// This plugin imports etcd2topo to register the etcd2 implementation of TopoServer.
diff --git a/go/cmd/topo2topo/plugin_zk2topo.go b/go/cmd/topo2topo/cli/plugin_zk2topo.go
similarity index 98%
rename from go/cmd/topo2topo/plugin_zk2topo.go
rename to go/cmd/topo2topo/cli/plugin_zk2topo.go
index 62dda455df7..66d14988c75 100644
--- a/go/cmd/topo2topo/plugin_zk2topo.go
+++ b/go/cmd/topo2topo/cli/plugin_zk2topo.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
import (
// Imports and register the zk2 TopologyServer
diff --git a/go/cmd/topo2topo/cli/topo2topo.go b/go/cmd/topo2topo/cli/topo2topo.go
new file mode 100644
index 00000000000..6e7e173872b
--- /dev/null
+++ b/go/cmd/topo2topo/cli/topo2topo.go
@@ -0,0 +1,158 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cli
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/acl"
+ "vitess.io/vitess/go/vt/grpccommon"
+ "vitess.io/vitess/go/vt/logutil"
+ "vitess.io/vitess/go/vt/servenv"
+ "vitess.io/vitess/go/vt/topo"
+ "vitess.io/vitess/go/vt/topo/helpers"
+)
+
+var (
+ fromImplementation string
+ fromServerAddress string
+ fromRoot string
+ toImplementation string
+ toServerAddress string
+ toRoot string
+ compare bool
+ doKeyspaces bool
+ doShards bool
+ doShardReplications bool
+ doTablets bool
+ doRoutingRules bool
+
+ Main = &cobra.Command{
+ Use: "topo2topo",
+ Short: "topo2topo copies Vitess topology data from one topo server to another.",
+ Long: `topo2topo copies Vitess topology data from one topo server to another.
+It can also be used to compare data between two topologies.`,
+ Args: cobra.NoArgs,
+ PreRunE: servenv.CobraPreRunE,
+ RunE: run,
+ }
+)
+
+func init() {
+ servenv.MoveFlagsToCobraCommand(Main)
+
+ Main.Flags().StringVar(&fromImplementation, "from_implementation", fromImplementation, "topology implementation to copy data from")
+ Main.Flags().StringVar(&fromServerAddress, "from_server", fromServerAddress, "topology server address to copy data from")
+ Main.Flags().StringVar(&fromRoot, "from_root", fromRoot, "topology server root to copy data from")
+ Main.Flags().StringVar(&toImplementation, "to_implementation", toImplementation, "topology implementation to copy data to")
+ Main.Flags().StringVar(&toServerAddress, "to_server", toServerAddress, "topology server address to copy data to")
+ Main.Flags().StringVar(&toRoot, "to_root", toRoot, "topology server root to copy data to")
+ Main.Flags().BoolVar(&compare, "compare", compare, "compares data between topologies")
+ Main.Flags().BoolVar(&doKeyspaces, "do-keyspaces", doKeyspaces, "copies the keyspace information")
+ Main.Flags().BoolVar(&doShards, "do-shards", doShards, "copies the shard information")
+ Main.Flags().BoolVar(&doShardReplications, "do-shard-replications", doShardReplications, "copies the shard replication information")
+ Main.Flags().BoolVar(&doTablets, "do-tablets", doTablets, "copies the tablet information")
+ Main.Flags().BoolVar(&doRoutingRules, "do-routing-rules", doRoutingRules, "copies the routing rules")
+
+ acl.RegisterFlags(Main.Flags())
+ grpccommon.RegisterFlags(Main.Flags())
+}
+
+func run(cmd *cobra.Command, args []string) error {
+ defer logutil.Flush()
+ servenv.Init()
+
+ fromTS, err := topo.OpenServer(fromImplementation, fromServerAddress, fromRoot)
+ if err != nil {
+ return fmt.Errorf("Cannot open 'from' topo %v: %w", fromImplementation, err)
+ }
+ toTS, err := topo.OpenServer(toImplementation, toServerAddress, toRoot)
+ if err != nil {
+ return fmt.Errorf("Cannot open 'to' topo %v: %w", toImplementation, err)
+ }
+
+ ctx := context.Background()
+
+ if compare {
+ return compareTopos(ctx, fromTS, toTS)
+ }
+
+ return copyTopos(ctx, fromTS, toTS)
+}
+
+func copyTopos(ctx context.Context, fromTS, toTS *topo.Server) error {
+ if doKeyspaces {
+ if err := helpers.CopyKeyspaces(ctx, fromTS, toTS); err != nil {
+ return err
+ }
+ }
+ if doShards {
+ if err := helpers.CopyShards(ctx, fromTS, toTS); err != nil {
+ return err
+ }
+ }
+ if doShardReplications {
+ if err := helpers.CopyShardReplications(ctx, fromTS, toTS); err != nil {
+ return err
+ }
+ }
+ if doTablets {
+ if err := helpers.CopyTablets(ctx, fromTS, toTS); err != nil {
+ return err
+ }
+ }
+ if doRoutingRules {
+ if err := helpers.CopyRoutingRules(ctx, fromTS, toTS); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func compareTopos(ctx context.Context, fromTS, toTS *topo.Server) (err error) {
+ if doKeyspaces {
+ err = helpers.CompareKeyspaces(ctx, fromTS, toTS)
+ if err != nil {
+ return fmt.Errorf("Compare keyspaces failed: %w", err)
+ }
+ }
+ if doShards {
+ err = helpers.CompareShards(ctx, fromTS, toTS)
+ if err != nil {
+ return fmt.Errorf("Compare shards failed: %w", err)
+ }
+ }
+ if doShardReplications {
+ err = helpers.CompareShardReplications(ctx, fromTS, toTS)
+ if err != nil {
+ return fmt.Errorf("Compare shard replications failed: %w", err)
+ }
+ }
+ if doTablets {
+ err = helpers.CompareTablets(ctx, fromTS, toTS)
+ if err != nil {
+ return fmt.Errorf("Compare tablets failed: %w", err)
+ }
+ }
+
+ fmt.Println("Topologies are in sync")
+ return nil
+}
diff --git a/go/cmd/topo2topo/docgen/main.go b/go/cmd/topo2topo/docgen/main.go
new file mode 100644
index 00000000000..c1d29fff086
--- /dev/null
+++ b/go/cmd/topo2topo/docgen/main.go
@@ -0,0 +1,37 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/cmd/internal/docgen"
+ "vitess.io/vitess/go/cmd/topo2topo/cli"
+)
+
+func main() {
+ var dir string
+ cmd := cobra.Command{
+ Use: "docgen [-d ]",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return docgen.GenerateMarkdownTree(cli.Main, dir)
+ },
+ }
+
+ cmd.Flags().StringVarP(&dir, "dir", "d", "doc", "output directory to write documentation")
+ _ = cmd.Execute()
+}
diff --git a/go/cmd/topo2topo/plugin_kubernetestopo.go b/go/cmd/topo2topo/plugin_kubernetestopo.go
deleted file mode 100644
index 671d0c8321f..00000000000
--- a/go/cmd/topo2topo/plugin_kubernetestopo.go
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
-Copyright 2020 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package main
-
-// This plugin imports k8stopo to register the kubernetes implementation of TopoServer.
-
-import (
- _ "vitess.io/vitess/go/vt/topo/k8stopo"
-)
diff --git a/go/cmd/topo2topo/topo2topo.go b/go/cmd/topo2topo/topo2topo.go
index 157960548b8..c1276ebf504 100644
--- a/go/cmd/topo2topo/topo2topo.go
+++ b/go/cmd/topo2topo/topo2topo.go
@@ -17,132 +17,15 @@ limitations under the License.
package main
import (
- "context"
- "fmt"
- "os"
-
- "github.com/spf13/pflag"
-
- "vitess.io/vitess/go/acl"
+ "vitess.io/vitess/go/cmd/topo2topo/cli"
"vitess.io/vitess/go/exit"
- "vitess.io/vitess/go/vt/grpccommon"
"vitess.io/vitess/go/vt/log"
- "vitess.io/vitess/go/vt/logutil"
- "vitess.io/vitess/go/vt/servenv"
- "vitess.io/vitess/go/vt/topo"
- "vitess.io/vitess/go/vt/topo/helpers"
-)
-
-var (
- fromImplementation string
- fromServerAddress string
- fromRoot string
- toImplementation string
- toServerAddress string
- toRoot string
- compare bool
- doKeyspaces bool
- doShards bool
- doShardReplications bool
- doTablets bool
- doRoutingRules bool
)
-func init() {
- servenv.OnParse(func(fs *pflag.FlagSet) {
- fs.StringVar(&fromImplementation, "from_implementation", fromImplementation, "topology implementation to copy data from")
- fs.StringVar(&fromServerAddress, "from_server", fromServerAddress, "topology server address to copy data from")
- fs.StringVar(&fromRoot, "from_root", fromRoot, "topology server root to copy data from")
- fs.StringVar(&toImplementation, "to_implementation", toImplementation, "topology implementation to copy data to")
- fs.StringVar(&toServerAddress, "to_server", toServerAddress, "topology server address to copy data to")
- fs.StringVar(&toRoot, "to_root", toRoot, "topology server root to copy data to")
- fs.BoolVar(&compare, "compare", compare, "compares data between topologies")
- fs.BoolVar(&doKeyspaces, "do-keyspaces", doKeyspaces, "copies the keyspace information")
- fs.BoolVar(&doShards, "do-shards", doShards, "copies the shard information")
- fs.BoolVar(&doShardReplications, "do-shard-replications", doShardReplications, "copies the shard replication information")
- fs.BoolVar(&doTablets, "do-tablets", doTablets, "copies the tablet information")
- fs.BoolVar(&doRoutingRules, "do-routing-rules", doRoutingRules, "copies the routing rules")
-
- acl.RegisterFlags(fs)
- })
-}
-
func main() {
defer exit.RecoverAll()
- defer logutil.Flush()
- fs := pflag.NewFlagSet("topo2topo", pflag.ExitOnError)
- grpccommon.RegisterFlags(fs)
- log.RegisterFlags(fs)
- logutil.RegisterFlags(fs)
-
- servenv.ParseFlags("topo2topo")
- servenv.Init()
-
- fromTS, err := topo.OpenServer(fromImplementation, fromServerAddress, fromRoot)
- if err != nil {
- log.Exitf("Cannot open 'from' topo %v: %v", fromImplementation, err)
- }
- toTS, err := topo.OpenServer(toImplementation, toServerAddress, toRoot)
- if err != nil {
- log.Exitf("Cannot open 'to' topo %v: %v", toImplementation, err)
- }
-
- ctx := context.Background()
-
- if compare {
- compareTopos(ctx, fromTS, toTS)
- return
- }
- copyTopos(ctx, fromTS, toTS)
-}
-
-func copyTopos(ctx context.Context, fromTS, toTS *topo.Server) {
- if doKeyspaces {
- helpers.CopyKeyspaces(ctx, fromTS, toTS)
- }
- if doShards {
- helpers.CopyShards(ctx, fromTS, toTS)
- }
- if doShardReplications {
- helpers.CopyShardReplications(ctx, fromTS, toTS)
- }
- if doTablets {
- helpers.CopyTablets(ctx, fromTS, toTS)
- }
- if doRoutingRules {
- helpers.CopyRoutingRules(ctx, fromTS, toTS)
- }
-}
-
-func compareTopos(ctx context.Context, fromTS, toTS *topo.Server) {
- var err error
- if doKeyspaces {
- err = helpers.CompareKeyspaces(ctx, fromTS, toTS)
- if err != nil {
- log.Exitf("Compare keyspaces failed: %v", err)
- }
- }
- if doShards {
- err = helpers.CompareShards(ctx, fromTS, toTS)
- if err != nil {
- log.Exitf("Compare shards failed: %v", err)
- }
- }
- if doShardReplications {
- err = helpers.CompareShardReplications(ctx, fromTS, toTS)
- if err != nil {
- log.Exitf("Compare shard replications failed: %v", err)
- }
- }
- if doTablets {
- err = helpers.CompareTablets(ctx, fromTS, toTS)
- if err != nil {
- log.Exitf("Compare tablets failed: %v", err)
- }
- }
- if err == nil {
- fmt.Println("Topologies are in sync")
- os.Exit(0)
+ if err := cli.Main.Execute(); err != nil {
+ log.Exitf("%s", err)
}
}
diff --git a/go/cmd/vtaclcheck/cli/vtactlcheck.go b/go/cmd/vtaclcheck/cli/vtactlcheck.go
new file mode 100644
index 00000000000..ebac94131e8
--- /dev/null
+++ b/go/cmd/vtaclcheck/cli/vtactlcheck.go
@@ -0,0 +1,67 @@
+/*
+Copyright 2023 The Vitess Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cli
+
+import (
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/acl"
+ "vitess.io/vitess/go/vt/logutil"
+ "vitess.io/vitess/go/vt/servenv"
+ "vitess.io/vitess/go/vt/vtaclcheck"
+)
+
+var (
+ aclFile string
+ staticAuthFile string
+
+ Main = &cobra.Command{
+ Use: "vtaclcheck",
+ Short: "vtaclcheck checks that the access-control list (ACL) rules in a given file are valid.",
+ Args: cobra.NoArgs,
+ Version: servenv.AppVersion.String(),
+ PreRunE: servenv.CobraPreRunE,
+ PostRun: func(cmd *cobra.Command, args []string) {
+ logutil.Flush()
+ },
+ RunE: run,
+ }
+)
+
+func run(cmd *cobra.Command, args []string) error {
+ servenv.Init()
+
+ opts := &vtaclcheck.Options{
+ ACLFile: aclFile,
+ StaticAuthFile: staticAuthFile,
+ }
+
+ if err := vtaclcheck.Init(opts); err != nil {
+ return err
+ }
+
+ return vtaclcheck.Run()
+}
+
+func init() {
+ servenv.MoveFlagsToCobraCommand(Main)
+
+ Main.Flags().StringVar(&aclFile, "acl-file", aclFile, "The path of the JSON ACL file to check")
+ Main.Flags().StringVar(&staticAuthFile, "static-auth-file", staticAuthFile, "The path of the auth_server_static JSON file to check")
+
+ acl.RegisterFlags(Main.Flags())
+}
diff --git a/go/cmd/vtaclcheck/docgen/main.go b/go/cmd/vtaclcheck/docgen/main.go
new file mode 100644
index 00000000000..d3da8b76179
--- /dev/null
+++ b/go/cmd/vtaclcheck/docgen/main.go
@@ -0,0 +1,37 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/cmd/internal/docgen"
+ "vitess.io/vitess/go/cmd/vtaclcheck/cli"
+)
+
+func main() {
+ var dir string
+ cmd := cobra.Command{
+ Use: "docgen [-d ]",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return docgen.GenerateMarkdownTree(cli.Main, dir)
+ },
+ }
+
+ cmd.Flags().StringVarP(&dir, "dir", "d", "doc", "output directory to write documentation")
+ _ = cmd.Execute()
+}
diff --git a/go/cmd/vtaclcheck/vtaclcheck.go b/go/cmd/vtaclcheck/vtaclcheck.go
index 8b916a8cc0c..bec4cf95fe9 100644
--- a/go/cmd/vtaclcheck/vtaclcheck.go
+++ b/go/cmd/vtaclcheck/vtaclcheck.go
@@ -19,52 +19,21 @@ package main
import (
"fmt"
- "github.com/spf13/pflag"
-
- "vitess.io/vitess/go/acl"
+ "vitess.io/vitess/go/cmd/vtaclcheck/cli"
"vitess.io/vitess/go/exit"
"vitess.io/vitess/go/vt/logutil"
- "vitess.io/vitess/go/vt/servenv"
- "vitess.io/vitess/go/vt/vtaclcheck"
)
-var aclFile, staticAuthFile string
-
func init() {
logger := logutil.NewConsoleLogger()
- servenv.OnParse(func(fs *pflag.FlagSet) {
- fs.StringVar(&aclFile, "acl-file", aclFile, "The path of the JSON ACL file to check")
- fs.StringVar(&staticAuthFile, "static-auth-file", staticAuthFile, "The path of the auth_server_static JSON file to check")
-
- acl.RegisterFlags(fs)
-
- fs.SetOutput(logutil.NewLoggerWriter(logger))
- })
+ cli.Main.SetOutput(logutil.NewLoggerWriter(logger))
}
func main() {
defer exit.RecoverAll()
- defer logutil.Flush()
-
- servenv.ParseFlags("vtaclcheck")
- servenv.Init()
- err := run()
- if err != nil {
+ if err := cli.Main.Execute(); err != nil {
fmt.Printf("ERROR: %s\n", err)
exit.Return(1)
}
}
-
-func run() error {
- opts := &vtaclcheck.Options{
- ACLFile: aclFile,
- StaticAuthFile: staticAuthFile,
- }
-
- if err := vtaclcheck.Init(opts); err != nil {
- return err
- }
-
- return vtaclcheck.Run()
-}
diff --git a/go/cmd/vtctld/plugin_azblobbackupstorage.go b/go/cmd/vtbackup/cli/plugin_azblobbackupstorage.go
similarity index 97%
rename from go/cmd/vtctld/plugin_azblobbackupstorage.go
rename to go/cmd/vtbackup/cli/plugin_azblobbackupstorage.go
index a4ca64096a9..bdadc894aae 100644
--- a/go/cmd/vtctld/plugin_azblobbackupstorage.go
+++ b/go/cmd/vtbackup/cli/plugin_azblobbackupstorage.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
import (
_ "vitess.io/vitess/go/vt/mysqlctl/azblobbackupstorage"
diff --git a/go/cmd/vtbackup/plugin_cephbackupstorage.go b/go/cmd/vtbackup/cli/plugin_cephbackupstorage.go
similarity index 97%
rename from go/cmd/vtbackup/plugin_cephbackupstorage.go
rename to go/cmd/vtbackup/cli/plugin_cephbackupstorage.go
index 819cb108126..2f5a825f270 100644
--- a/go/cmd/vtbackup/plugin_cephbackupstorage.go
+++ b/go/cmd/vtbackup/cli/plugin_cephbackupstorage.go
@@ -13,7 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
import (
_ "vitess.io/vitess/go/vt/mysqlctl/cephbackupstorage"
diff --git a/go/cmd/vtbackup/plugin_consultopo.go b/go/cmd/vtbackup/cli/plugin_consultopo.go
similarity index 97%
rename from go/cmd/vtbackup/plugin_consultopo.go
rename to go/cmd/vtbackup/cli/plugin_consultopo.go
index 2b6f10e2b28..c2f8de3339e 100644
--- a/go/cmd/vtbackup/plugin_consultopo.go
+++ b/go/cmd/vtbackup/cli/plugin_consultopo.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
import (
_ "vitess.io/vitess/go/vt/topo/consultopo"
diff --git a/go/cmd/vtbackup/plugin_etcd2topo.go b/go/cmd/vtbackup/cli/plugin_etcd2topo.go
similarity index 97%
rename from go/cmd/vtbackup/plugin_etcd2topo.go
rename to go/cmd/vtbackup/cli/plugin_etcd2topo.go
index 97412e65755..e4d6d4129ff 100644
--- a/go/cmd/vtbackup/plugin_etcd2topo.go
+++ b/go/cmd/vtbackup/cli/plugin_etcd2topo.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
import (
_ "vitess.io/vitess/go/vt/topo/etcd2topo"
diff --git a/go/cmd/vtbackup/plugin_filebackupstorage.go b/go/cmd/vtbackup/cli/plugin_filebackupstorage.go
similarity index 97%
rename from go/cmd/vtbackup/plugin_filebackupstorage.go
rename to go/cmd/vtbackup/cli/plugin_filebackupstorage.go
index 31417781026..68bf790c827 100644
--- a/go/cmd/vtbackup/plugin_filebackupstorage.go
+++ b/go/cmd/vtbackup/cli/plugin_filebackupstorage.go
@@ -13,7 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
import (
_ "vitess.io/vitess/go/vt/mysqlctl/filebackupstorage"
diff --git a/go/cmd/vtbackup/plugin_gcsbackupstorage.go b/go/cmd/vtbackup/cli/plugin_gcsbackupstorage.go
similarity index 97%
rename from go/cmd/vtbackup/plugin_gcsbackupstorage.go
rename to go/cmd/vtbackup/cli/plugin_gcsbackupstorage.go
index 2319d0aa7fe..eff9339a318 100644
--- a/go/cmd/vtbackup/plugin_gcsbackupstorage.go
+++ b/go/cmd/vtbackup/cli/plugin_gcsbackupstorage.go
@@ -13,7 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
import (
_ "vitess.io/vitess/go/vt/mysqlctl/gcsbackupstorage"
diff --git a/go/vt/vtgr/controller/controller.go b/go/cmd/vtbackup/cli/plugin_opentsdb.go
similarity index 74%
rename from go/vt/vtgr/controller/controller.go
rename to go/cmd/vtbackup/cli/plugin_opentsdb.go
index 2b2c36cd320..597e426cc09 100644
--- a/go/vt/vtgr/controller/controller.go
+++ b/go/cmd/vtbackup/cli/plugin_opentsdb.go
@@ -1,5 +1,5 @@
/*
-Copyright 2021 The Vitess Authors.
+Copyright 2023 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,13 +14,12 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package controller
+package cli
-import (
- "math/rand"
- "time"
-)
+import "vitess.io/vitess/go/stats/opentsdb"
+
+// This plugin imports opentsdb to register the opentsdb stats backend.
func init() {
- rand.Seed(time.Now().UnixNano())
+ opentsdb.Init("vtbackup")
}
diff --git a/go/cmd/vtbackup/plugin_prometheusbackend.go b/go/cmd/vtbackup/cli/plugin_prometheusbackend.go
similarity index 98%
rename from go/cmd/vtbackup/plugin_prometheusbackend.go
rename to go/cmd/vtbackup/cli/plugin_prometheusbackend.go
index de4ecbb5e9f..3cf256e76c1 100644
--- a/go/cmd/vtbackup/plugin_prometheusbackend.go
+++ b/go/cmd/vtbackup/cli/plugin_prometheusbackend.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
// This plugin imports Prometheus to allow for instrumentation
// with the Prometheus client library
diff --git a/go/cmd/vtbackup/plugin_s3backupstorage.go b/go/cmd/vtbackup/cli/plugin_s3backupstorage.go
similarity index 97%
rename from go/cmd/vtbackup/plugin_s3backupstorage.go
rename to go/cmd/vtbackup/cli/plugin_s3backupstorage.go
index 917352f2469..27b4ef06dee 100644
--- a/go/cmd/vtbackup/plugin_s3backupstorage.go
+++ b/go/cmd/vtbackup/cli/plugin_s3backupstorage.go
@@ -13,7 +13,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
import (
_ "vitess.io/vitess/go/vt/mysqlctl/s3backupstorage"
diff --git a/go/cmd/vtbackup/plugin_zk2topo.go b/go/cmd/vtbackup/cli/plugin_zk2topo.go
similarity index 97%
rename from go/cmd/vtbackup/plugin_zk2topo.go
rename to go/cmd/vtbackup/cli/plugin_zk2topo.go
index 5819d2d39ed..914a9b924f9 100644
--- a/go/cmd/vtbackup/plugin_zk2topo.go
+++ b/go/cmd/vtbackup/cli/plugin_zk2topo.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
import (
_ "vitess.io/vitess/go/vt/topo/zk2topo"
diff --git a/go/cmd/vtbackup/cli/vtbackup.go b/go/cmd/vtbackup/cli/vtbackup.go
new file mode 100644
index 00000000000..121ba39b8c5
--- /dev/null
+++ b/go/cmd/vtbackup/cli/vtbackup.go
@@ -0,0 +1,875 @@
+/*
+Copyright 2023 The Vitess Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cli
+
+import (
+ "context"
+ "crypto/rand"
+ "fmt"
+ "math"
+ "math/big"
+ "os"
+ "strings"
+ "syscall"
+ "time"
+
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/mysql/replication"
+
+ "vitess.io/vitess/go/acl"
+ "vitess.io/vitess/go/cmd"
+ "vitess.io/vitess/go/exit"
+ "vitess.io/vitess/go/stats"
+ "vitess.io/vitess/go/vt/dbconfigs"
+ "vitess.io/vitess/go/vt/log"
+ "vitess.io/vitess/go/vt/logutil"
+ "vitess.io/vitess/go/vt/mysqlctl"
+ "vitess.io/vitess/go/vt/mysqlctl/backupstats"
+ "vitess.io/vitess/go/vt/mysqlctl/backupstorage"
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+ "vitess.io/vitess/go/vt/servenv"
+ "vitess.io/vitess/go/vt/topo"
+ "vitess.io/vitess/go/vt/topo/topoproto"
+ "vitess.io/vitess/go/vt/vterrors"
+ _ "vitess.io/vitess/go/vt/vttablet/grpctmclient"
+ "vitess.io/vitess/go/vt/vttablet/tmclient"
+)
+
+const (
+ // operationTimeout is the timeout for individual operations like fetching
+ // the primary position. This does not impose an overall timeout on
+ // long-running processes like taking the backup. It only applies to
+ // steps along the way that should complete quickly. This ensures we don't
+ // place a hard cap on the overall time for a backup, while also not waiting
+ // forever for things that should be quick.
+ operationTimeout = 1 * time.Minute
+
+ phaseNameCatchupReplication = "CatchupReplication"
+ phaseNameInitialBackup = "InitialBackup"
+ phaseNameRestoreLastBackup = "RestoreLastBackup"
+ phaseNameTakeNewBackup = "TakeNewBackup"
+ phaseStatusCatchupReplicationStalled = "Stalled"
+ phaseStatusCatchupReplicationStopped = "Stopped"
+)
+
+var (
+ minBackupInterval time.Duration
+ minRetentionTime time.Duration
+ minRetentionCount = 1
+ initialBackup bool
+ allowFirstBackup bool
+ restartBeforeBackup bool
+ upgradeSafe bool
+
+ // vttablet-like flags
+ initDbNameOverride string
+ initKeyspace string
+ initShard string
+ concurrency = 4
+ incrementalFromPos string
+
+ // mysqlctld-like flags
+ mysqlPort = 3306
+ mysqlSocket string
+ mysqlTimeout = 5 * time.Minute
+ initDBSQLFile string
+ detachedMode bool
+ keepAliveTimeout time.Duration
+ disableRedoLog bool
+
+ // Deprecated, use "Phase" instead.
+ deprecatedDurationByPhase = stats.NewGaugesWithSingleLabel(
+ "DurationByPhaseSeconds",
+ "[DEPRECATED] How long it took vtbackup to perform each phase (in seconds).",
+ "phase",
+ )
+
+ // This gauge is updated 3*N times during the course of a vtbackup run,
+ // where N is the number of different phases vtbackup transitions through.
+ // Once to initialize to 0, another time to set the phase to active (1),
+ // and another to deactivate the phase (back to 0).
+ //
+ // At most a single phase is active at a given time.
+ //
+ // The sync gauge immediately reports changes to push-backed backends.
+ // The benefit of the sync gauge is that it makes verifying stats in
+ // integration tests a lot more tractable.
+ phase = stats.NewSyncGaugesWithSingleLabel(
+ "Phase",
+ "Active phase.",
+ "phase",
+ )
+ phaseNames = []string{
+ phaseNameCatchupReplication,
+ phaseNameInitialBackup,
+ phaseNameRestoreLastBackup,
+ phaseNameTakeNewBackup,
+ }
+ phaseStatus = stats.NewGaugesWithMultiLabels(
+ "PhaseStatus",
+ "Internal state of vtbackup phase.",
+ []string{"phase", "status"},
+ )
+ phaseStatuses = map[string][]string{
+ phaseNameCatchupReplication: {
+ phaseStatusCatchupReplicationStalled,
+ phaseStatusCatchupReplicationStopped,
+ },
+ }
+
+ Main = &cobra.Command{
+ Use: "vtbackup",
+ Short: "vtbackup is a batch command to perform a single pass of backup maintenance for a shard.",
+ Long: `vtbackup is a batch command to perform a single pass of backup maintenance for a shard.
+
+When run periodically for each shard, vtbackup can ensure these configurable policies:
+ * There is always a recent backup for the shard.
+ * Old backups for the shard are removed.
+
+Whatever system launches vtbackup is responsible for the following:
+ - Running vtbackup with similar flags that would be used for a vttablet and
+ mysqlctld in the target shard to be backed up.
+ - Provisioning as much disk space for vtbackup as would be given to vttablet.
+ The data directory MUST be empty at startup. Do NOT reuse a persistent disk.
+ - Running vtbackup periodically for each shard, for each backup storage location.
+ - Ensuring that at most one instance runs at a time for a given pair of shard
+ and backup storage location.
+ - Retrying vtbackup if it fails.
+ - Alerting human operators if the failure is persistent.
+
+The process vtbackup follows to take a new backup has the following steps:
+ 1. Restore from the most recent backup.
+ 2. Start a mysqld instance (but no vttablet) from the restored data.
+ 3. Instruct mysqld to connect to the current shard primary and replicate any
+ transactions that are new since the last backup.
+ 4. Ask the primary for its current replication position and set that as the goal
+ for catching up on replication before taking the backup, so the goalposts
+ don't move.
+ 5. Wait until replication is caught up to the goal position or beyond.
+ 6. Stop mysqld and take a new backup.
+
+Aside from additional replication load while vtbackup's mysqld catches up on
+new transactions, the shard should be otherwise unaffected. Existing tablets
+will continue to serve, and no new tablets will appear in topology, meaning no
+query traffic will ever be routed to vtbackup's mysqld. This silent operation
+mode helps make backups minimally disruptive to serving capacity and orthogonal
+to the handling of the query path.
+
+The command-line parameters to vtbackup specify a policy for when a new backup
+is needed, and when old backups should be removed. If the existing backups
+already satisfy the policy, then vtbackup will do nothing and return success
+immediately.`,
+ Version: servenv.AppVersion.String(),
+ Args: cobra.NoArgs,
+ PreRunE: servenv.CobraPreRunE,
+ RunE: run,
+ }
+)
+
+func init() {
+ servenv.RegisterDefaultFlags()
+ dbconfigs.RegisterFlags(dbconfigs.All...)
+ mysqlctl.RegisterFlags()
+
+ servenv.MoveFlagsToCobraCommand(Main)
+
+ Main.Flags().DurationVar(&minBackupInterval, "min_backup_interval", minBackupInterval, "Only take a new backup if it's been at least this long since the most recent backup.")
+ Main.Flags().DurationVar(&minRetentionTime, "min_retention_time", minRetentionTime, "Keep each old backup for at least this long before removing it. Set to 0 to disable pruning of old backups.")
+ Main.Flags().IntVar(&minRetentionCount, "min_retention_count", minRetentionCount, "Always keep at least this many of the most recent backups in this backup storage location, even if some are older than the min_retention_time. This must be at least 1 since a backup must always exist to allow new backups to be made")
+ Main.Flags().BoolVar(&initialBackup, "initial_backup", initialBackup, "Instead of restoring from backup, initialize an empty database with the provided init_db_sql_file and upload a backup of that for the shard, if the shard has no backups yet. This can be used to seed a brand new shard with an initial, empty backup. If any backups already exist for the shard, this will be considered a successful no-op. This can only be done before the shard exists in topology (i.e. before any tablets are deployed).")
+ Main.Flags().BoolVar(&allowFirstBackup, "allow_first_backup", allowFirstBackup, "Allow this job to take the first backup of an existing shard.")
+ Main.Flags().BoolVar(&restartBeforeBackup, "restart_before_backup", restartBeforeBackup, "Perform a mysqld clean/full restart after applying binlogs, but before taking the backup. Only makes sense to work around xtrabackup bugs.")
+ Main.Flags().BoolVar(&upgradeSafe, "upgrade-safe", upgradeSafe, "Whether to use innodb_fast_shutdown=0 for the backup so it is safe to use for MySQL upgrades.")
+
+ // vttablet-like flags
+ Main.Flags().StringVar(&initDbNameOverride, "init_db_name_override", initDbNameOverride, "(init parameter) override the name of the db used by vttablet")
+ Main.Flags().StringVar(&initKeyspace, "init_keyspace", initKeyspace, "(init parameter) keyspace to use for this tablet")
+ Main.Flags().StringVar(&initShard, "init_shard", initShard, "(init parameter) shard to use for this tablet")
+ Main.Flags().IntVar(&concurrency, "concurrency", concurrency, "(init restore parameter) how many concurrent files to restore at once")
+ Main.Flags().StringVar(&incrementalFromPos, "incremental_from_pos", incrementalFromPos, "Position of previous backup. Default: empty. If given, then this backup becomes an incremental backup from given position. If value is 'auto', backup taken from last successful backup position")
+
+ // mysqlctld-like flags
+ Main.Flags().IntVar(&mysqlPort, "mysql_port", mysqlPort, "mysql port")
+ Main.Flags().StringVar(&mysqlSocket, "mysql_socket", mysqlSocket, "path to the mysql socket")
+ Main.Flags().DurationVar(&mysqlTimeout, "mysql_timeout", mysqlTimeout, "how long to wait for mysqld startup")
+ Main.Flags().StringVar(&initDBSQLFile, "init_db_sql_file", initDBSQLFile, "path to .sql file to run after mysql_install_db")
+ Main.Flags().BoolVar(&detachedMode, "detach", detachedMode, "detached mode - run backups detached from the terminal")
+ Main.Flags().DurationVar(&keepAliveTimeout, "keep-alive-timeout", keepAliveTimeout, "Wait until timeout elapses after a successful backup before shutting down.")
+ Main.Flags().BoolVar(&disableRedoLog, "disable-redo-log", disableRedoLog, "Disable InnoDB redo log during replication-from-primary phase of backup.")
+
+ acl.RegisterFlags(Main.Flags())
+}
+
+func run(_ *cobra.Command, args []string) error {
+ servenv.Init()
+
+ ctx, cancel := context.WithCancel(context.Background())
+ servenv.OnClose(func() {
+ cancel()
+ })
+
+ defer func() {
+ servenv.ExitChan <- syscall.SIGTERM
+ <-ctx.Done()
+ }()
+
+ go servenv.RunDefault()
+ // Some stats plugins use OnRun to initialize. Wait for them to finish
+ // initializing before continuing, so we don't lose any stats.
+ if err := stats.AwaitBackend(ctx); err != nil {
+ return fmt.Errorf("failed to await stats backend: %w", err)
+ }
+
+ if detachedMode {
+ // this method will call os.Exit and kill this process
+ cmd.DetachFromTerminalAndExit()
+ }
+
+ defer logutil.Flush()
+
+ if minRetentionCount < 1 {
+ log.Errorf("min_retention_count must be at least 1 to allow restores to succeed")
+ exit.Return(1)
+ }
+
+ // Open connection backup storage.
+ backupStorage, err := backupstorage.GetBackupStorage()
+ if err != nil {
+ return fmt.Errorf("Can't get backup storage: %w", err)
+ }
+ defer backupStorage.Close()
+ // Open connection to topology server.
+ topoServer := topo.Open()
+ defer topoServer.Close()
+
+ // Initialize stats.
+ for _, phaseName := range phaseNames {
+ phase.Set(phaseName, int64(0))
+ }
+ for phaseName, statuses := range phaseStatuses {
+ for _, status := range statuses {
+ phaseStatus.Set([]string{phaseName, status}, 0)
+ }
+ }
+
+ // Try to take a backup, if it's been long enough since the last one.
+ // Skip pruning if backup wasn't fully successful. We don't want to be
+ // deleting things if the backup process is not healthy.
+ backupDir := mysqlctl.GetBackupDir(initKeyspace, initShard)
+ doBackup, err := shouldBackup(ctx, topoServer, backupStorage, backupDir)
+ if err != nil {
+ return fmt.Errorf("Can't take backup: %w", err)
+ }
+ if doBackup {
+ if err := takeBackup(ctx, topoServer, backupStorage); err != nil {
+ return fmt.Errorf("Failed to take backup: %w", err)
+ }
+ }
+
+ // Prune old backups.
+ if err := pruneBackups(ctx, backupStorage, backupDir); err != nil {
+ return fmt.Errorf("Couldn't prune old backups: %w", err)
+ }
+
+ if keepAliveTimeout > 0 {
+ log.Infof("Backup was successful, waiting %s before exiting (or until context expires).", keepAliveTimeout)
+ select {
+ case <-time.After(keepAliveTimeout):
+ case <-ctx.Done():
+ }
+ }
+ log.Info("Exiting.")
+
+ return nil
+}
+
+func takeBackup(ctx context.Context, topoServer *topo.Server, backupStorage backupstorage.BackupStorage) error {
+ // This is an imaginary tablet alias. The value doesn't matter for anything,
+ // except that we generate a random UID to ensure the target backup
+ // directory is unique if multiple vtbackup instances are launched for the
+ // same shard, at exactly the same second, pointed at the same backup
+ // storage location.
+ bigN, err := rand.Int(rand.Reader, big.NewInt(math.MaxUint32))
+ if err != nil {
+ return fmt.Errorf("can't generate random tablet UID: %v", err)
+ }
+ tabletAlias := &topodatapb.TabletAlias{
+ Cell: "vtbackup",
+ Uid: uint32(bigN.Uint64()),
+ }
+
+ // Clean up our temporary data dir if we exit for any reason, to make sure
+ // every invocation of vtbackup starts with a clean slate, and it does not
+ // accumulate garbage (and run out of disk space) if it's restarted.
+ tabletDir := mysqlctl.TabletDir(tabletAlias.Uid)
+ defer func() {
+ log.Infof("Removing temporary tablet directory: %v", tabletDir)
+ if err := os.RemoveAll(tabletDir); err != nil {
+ log.Warningf("Failed to remove temporary tablet directory: %v", err)
+ }
+ }()
+
+ // Start up mysqld as if we are mysqlctld provisioning a fresh tablet.
+ mysqld, mycnf, err := mysqlctl.CreateMysqldAndMycnf(tabletAlias.Uid, mysqlSocket, mysqlPort)
+ if err != nil {
+ return fmt.Errorf("failed to initialize mysql config: %v", err)
+ }
+ initCtx, initCancel := context.WithTimeout(ctx, mysqlTimeout)
+ defer initCancel()
+ initMysqldAt := time.Now()
+ if err := mysqld.Init(initCtx, mycnf, initDBSQLFile); err != nil {
+ return fmt.Errorf("failed to initialize mysql data dir and start mysqld: %v", err)
+ }
+ deprecatedDurationByPhase.Set("InitMySQLd", int64(time.Since(initMysqldAt).Seconds()))
+ // Shut down mysqld when we're done.
+ defer func() {
+ // Be careful not to use the original context, because we don't want to
+ // skip shutdown just because we timed out waiting for other things.
+ mysqlShutdownCtx, mysqlShutdownCancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer mysqlShutdownCancel()
+ if err := mysqld.Shutdown(mysqlShutdownCtx, mycnf, false); err != nil {
+ log.Errorf("failed to shutdown mysqld: %v", err)
+ }
+ }()
+
+ extraEnv := map[string]string{
+ "TABLET_ALIAS": topoproto.TabletAliasString(tabletAlias),
+ }
+ dbName := initDbNameOverride
+ if dbName == "" {
+ dbName = fmt.Sprintf("vt_%s", initKeyspace)
+ }
+
+ backupParams := mysqlctl.BackupParams{
+ Cnf: mycnf,
+ Mysqld: mysqld,
+ Logger: logutil.NewConsoleLogger(),
+ Concurrency: concurrency,
+ IncrementalFromPos: incrementalFromPos,
+ HookExtraEnv: extraEnv,
+ TopoServer: topoServer,
+ Keyspace: initKeyspace,
+ Shard: initShard,
+ TabletAlias: topoproto.TabletAliasString(tabletAlias),
+ Stats: backupstats.BackupStats(),
+ UpgradeSafe: upgradeSafe,
+ }
+ // In initial_backup mode, just take a backup of this empty database.
+ if initialBackup {
+ // Take a backup of this empty DB without restoring anything.
+ // First, initialize it the way InitShardPrimary would, so this backup
+ // produces a result that can be used to skip InitShardPrimary entirely.
+ // This involves resetting replication (to erase any history) and then
+ // creating the main database and some Vitess system tables.
+ if err := mysqld.ResetReplication(ctx); err != nil {
+ return fmt.Errorf("can't reset replication: %v", err)
+ }
+ // We need to switch off super_read_only before we create the database.
+ resetFunc, err := mysqld.SetSuperReadOnly(false)
+ if err != nil {
+ return fmt.Errorf("failed to disable super_read_only during backup: %v", err)
+ }
+ if resetFunc != nil {
+ defer func() {
+ err := resetFunc()
+ if err != nil {
+ log.Error("Failed to set super_read_only back to its original value during backup")
+ }
+ }()
+ }
+ cmd := mysqlctl.GenerateInitialBinlogEntry()
+ if err := mysqld.ExecuteSuperQueryList(ctx, []string{cmd}); err != nil {
+ return err
+ }
+
+ backupParams.BackupTime = time.Now()
+ // Now we're ready to take the backup.
+ phase.Set(phaseNameInitialBackup, int64(1))
+ defer phase.Set(phaseNameInitialBackup, int64(0))
+ if err := mysqlctl.Backup(ctx, backupParams); err != nil {
+ return fmt.Errorf("backup failed: %v", err)
+ }
+ deprecatedDurationByPhase.Set("InitialBackup", int64(time.Since(backupParams.BackupTime).Seconds()))
+ log.Info("Initial backup successful.")
+ phase.Set(phaseNameInitialBackup, int64(0))
+ return nil
+ }
+
+ phase.Set(phaseNameRestoreLastBackup, int64(1))
+ defer phase.Set(phaseNameRestoreLastBackup, int64(0))
+ backupDir := mysqlctl.GetBackupDir(initKeyspace, initShard)
+ log.Infof("Restoring latest backup from directory %v", backupDir)
+ restoreAt := time.Now()
+ params := mysqlctl.RestoreParams{
+ Cnf: mycnf,
+ Mysqld: mysqld,
+ Logger: logutil.NewConsoleLogger(),
+ Concurrency: concurrency,
+ HookExtraEnv: extraEnv,
+ DeleteBeforeRestore: true,
+ DbName: dbName,
+ Keyspace: initKeyspace,
+ Shard: initShard,
+ Stats: backupstats.RestoreStats(),
+ }
+ backupManifest, err := mysqlctl.Restore(ctx, params)
+ var restorePos replication.Position
+ switch err {
+ case nil:
+ // if err is nil, we expect backupManifest to be non-nil
+ restorePos = backupManifest.Position
+ log.Infof("Successfully restored from backup at replication position %v", restorePos)
+ case mysqlctl.ErrNoBackup:
+ // There is no backup found, but we may be taking the initial backup of a shard
+ if !allowFirstBackup {
+ return fmt.Errorf("no backup found; not starting up empty since --initial_backup flag was not enabled")
+ }
+ restorePos = replication.Position{}
+ default:
+ return fmt.Errorf("can't restore from backup: %v", err)
+ }
+ deprecatedDurationByPhase.Set("RestoreLastBackup", int64(time.Since(restoreAt).Seconds()))
+ phase.Set(phaseNameRestoreLastBackup, int64(0))
+
+ // As of MySQL 8.0.21, you can disable redo logging using the ALTER INSTANCE
+ // DISABLE INNODB REDO_LOG statement. This functionality is intended for
+ // loading data into a new MySQL instance. Disabling redo logging speeds up
+ // data loading by avoiding redo log writes and doublewrite buffering.
+ disabledRedoLog := false
+ if disableRedoLog {
+ if err := mysqld.DisableRedoLog(ctx); err != nil {
+ log.Warningf("Error disabling redo logging: %v", err)
+ } else {
+ disabledRedoLog = true
+ }
+ }
+
+ // We have restored a backup. Now start replication.
+ if err := resetReplication(ctx, restorePos, mysqld); err != nil {
+ return fmt.Errorf("error resetting replication: %v", err)
+ }
+ if err := startReplication(ctx, mysqld, topoServer); err != nil {
+ return fmt.Errorf("error starting replication: %v", err)
+ }
+
+ log.Info("get the current primary replication position, and wait until we catch up")
+ // Get the current primary replication position, and wait until we catch up
+ // to that point. We do this instead of looking at ReplicationLag
+ // because that value can
+ // sometimes lie and tell you there's 0 lag when actually replication is
+ // stopped. Also, if replication is making progress but is too slow to ever
+ // catch up to live changes, we'd rather take a backup of something rather
+ // than timing out.
+ tmc := tmclient.NewTabletManagerClient()
+ // Keep retrying if we can't contact the primary. The primary might be
+ // changing, moving, or down temporarily.
+ var primaryPos replication.Position
+ err = retryOnError(ctx, func() error {
+ // Add a per-operation timeout so we re-read topo if the primary is unreachable.
+ opCtx, optCancel := context.WithTimeout(ctx, operationTimeout)
+ defer optCancel()
+ pos, err := getPrimaryPosition(opCtx, tmc, topoServer)
+ if err != nil {
+ return fmt.Errorf("can't get the primary replication position: %v", err)
+ }
+ primaryPos = pos
+ return nil
+ })
+ if err != nil {
+ return fmt.Errorf("can't get the primary replication position after all retries: %v", err)
+ }
+
+ log.Infof("takeBackup: primary position is: %s", primaryPos.String())
+
+ // Remember the time when we fetched the primary position, not when we caught
+ // up to it, so the timestamp on our backup is honest (assuming we make it
+ // to the goal position).
+ backupParams.BackupTime = time.Now()
+
+ // Wait for replication to catch up.
+ phase.Set(phaseNameCatchupReplication, int64(1))
+ defer phase.Set(phaseNameCatchupReplication, int64(0))
+
+ var (
+ lastStatus replication.ReplicationStatus
+ status replication.ReplicationStatus
+ statusErr error
+
+ waitStartTime = time.Now()
+ )
+ for {
+ select {
+ case <-ctx.Done():
+ return fmt.Errorf("error in replication catch up: %v", ctx.Err())
+ case <-time.After(time.Second):
+ }
+
+ lastStatus = status
+ status, statusErr = mysqld.ReplicationStatus()
+ if statusErr != nil {
+ log.Warningf("Error getting replication status: %v", statusErr)
+ continue
+ }
+ if status.Position.AtLeast(primaryPos) {
+ // We're caught up on replication to at least the point the primary
+ // was at when this vtbackup run started.
+ log.Infof("Replication caught up to %v after %v", status.Position, time.Since(waitStartTime))
+ deprecatedDurationByPhase.Set("CatchUpReplication", int64(time.Since(waitStartTime).Seconds()))
+ break
+ }
+ if !lastStatus.Position.IsZero() {
+ if status.Position.Equal(lastStatus.Position) {
+ phaseStatus.Set([]string{phaseNameCatchupReplication, phaseStatusCatchupReplicationStalled}, 1)
+ } else {
+ phaseStatus.Set([]string{phaseNameCatchupReplication, phaseStatusCatchupReplicationStalled}, 0)
+ }
+ }
+ if !status.Healthy() {
+ log.Warning("Replication has stopped before backup could be taken. Trying to restart replication.")
+ phaseStatus.Set([]string{phaseNameCatchupReplication, phaseStatusCatchupReplicationStopped}, 1)
+ if err := startReplication(ctx, mysqld, topoServer); err != nil {
+ log.Warningf("Failed to restart replication: %v", err)
+ }
+ } else {
+ phaseStatus.Set([]string{phaseNameCatchupReplication, phaseStatusCatchupReplicationStopped}, 0)
+ }
+ }
+ phase.Set(phaseNameCatchupReplication, int64(0))
+
+ // Stop replication and see where we are.
+ if err := mysqld.StopReplication(nil); err != nil {
+ return fmt.Errorf("can't stop replication: %v", err)
+ }
+
+ // Did we make any progress?
+ status, statusErr = mysqld.ReplicationStatus()
+ if statusErr != nil {
+ return fmt.Errorf("can't get replication status: %v", err)
+ }
+ log.Infof("Replication caught up to %v", status.Position)
+ if !status.Position.AtLeast(primaryPos) && status.Position.Equal(restorePos) {
+ return fmt.Errorf("not taking backup: replication did not make any progress from restore point: %v", restorePos)
+ }
+ phaseStatus.Set([]string{phaseNameCatchupReplication, phaseStatusCatchupReplicationStalled}, 0)
+ phaseStatus.Set([]string{phaseNameCatchupReplication, phaseStatusCatchupReplicationStopped}, 0)
+
+ // Re-enable redo logging.
+ if disabledRedoLog {
+ if err := mysqld.EnableRedoLog(ctx); err != nil {
+ return fmt.Errorf("failed to re-enable redo log: %v", err)
+ }
+ }
+
+ if restartBeforeBackup {
+ restartAt := time.Now()
+ log.Info("Proceeding with clean MySQL shutdown and startup to flush all buffers.")
+ // Prep for full/clean shutdown (not typically the default)
+ if err := mysqld.ExecuteSuperQuery(ctx, "SET GLOBAL innodb_fast_shutdown=0"); err != nil {
+ return fmt.Errorf("Could not prep for full shutdown: %v", err)
+ }
+ // Shutdown, waiting for it to finish
+ if err := mysqld.Shutdown(ctx, mycnf, true); err != nil {
+ return fmt.Errorf("Something went wrong during full MySQL shutdown: %v", err)
+ }
+ // Start MySQL, waiting for it to come up
+ if err := mysqld.Start(ctx, mycnf); err != nil {
+ return fmt.Errorf("Could not start MySQL after full shutdown: %v", err)
+ }
+ deprecatedDurationByPhase.Set("RestartBeforeBackup", int64(time.Since(restartAt).Seconds()))
+ }
+
+ // Now we can take a new backup.
+ backupAt := time.Now()
+ phase.Set(phaseNameTakeNewBackup, int64(1))
+ defer phase.Set(phaseNameTakeNewBackup, int64(0))
+ if err := mysqlctl.Backup(ctx, backupParams); err != nil {
+ return fmt.Errorf("error taking backup: %v", err)
+ }
+ deprecatedDurationByPhase.Set("TakeNewBackup", int64(time.Since(backupAt).Seconds()))
+ phase.Set(phaseNameTakeNewBackup, int64(0))
+
+ // Return a non-zero exit code if we didn't meet the replication position
+ // goal, even though we took a backup that pushes the high-water mark up.
+ if !status.Position.AtLeast(primaryPos) {
+ return fmt.Errorf("replication caught up to %v but didn't make it to the goal of %v; a backup was taken anyway to save partial progress, but the operation should still be retried since not all expected data is backed up", status.Position, primaryPos)
+ }
+ log.Info("Backup successful.")
+ return nil
+}
+
+func resetReplication(ctx context.Context, pos replication.Position, mysqld mysqlctl.MysqlDaemon) error {
+ cmds := []string{
+ "STOP SLAVE",
+ "RESET SLAVE ALL", // "ALL" makes it forget replication source host:port.
+ }
+ if err := mysqld.ExecuteSuperQueryList(ctx, cmds); err != nil {
+ return vterrors.Wrap(err, "failed to reset replication")
+ }
+
+ // Check if we have a position to resume from, if not reset to the beginning of time
+ if !pos.IsZero() {
+ // Set the position at which to resume from the replication source.
+ if err := mysqld.SetReplicationPosition(ctx, pos); err != nil {
+ return vterrors.Wrap(err, "failed to set replica position")
+ }
+ } else {
+ if err := mysqld.ResetReplication(ctx); err != nil {
+ return vterrors.Wrap(err, "failed to reset replication")
+ }
+ }
+ return nil
+}
+
+func startReplication(ctx context.Context, mysqld mysqlctl.MysqlDaemon, topoServer *topo.Server) error {
+ si, err := topoServer.GetShard(ctx, initKeyspace, initShard)
+ if err != nil {
+ return vterrors.Wrap(err, "can't read shard")
+ }
+ if topoproto.TabletAliasIsZero(si.PrimaryAlias) {
+ // Normal tablets will sit around waiting to be reparented in this case.
+ // Since vtbackup is a batch job, we just have to fail.
+ return fmt.Errorf("can't start replication after restore: shard %v/%v has no primary", initKeyspace, initShard)
+ }
+ // TODO(enisoc): Support replicating from another replica, preferably in the
+ // same cell, preferably rdonly, to reduce load on the primary.
+ ti, err := topoServer.GetTablet(ctx, si.PrimaryAlias)
+ if err != nil {
+ return vterrors.Wrapf(err, "Cannot read primary tablet %v", si.PrimaryAlias)
+ }
+
+ // Stop replication (in case we're restarting), set replication source, and start replication.
+ if err := mysqld.SetReplicationSource(ctx, ti.Tablet.MysqlHostname, ti.Tablet.MysqlPort, true /* stopReplicationBefore */, true /* startReplicationAfter */); err != nil {
+ return vterrors.Wrap(err, "MysqlDaemon.SetReplicationSource failed")
+ }
+ return nil
+}
+
+func getPrimaryPosition(ctx context.Context, tmc tmclient.TabletManagerClient, ts *topo.Server) (replication.Position, error) {
+ si, err := ts.GetShard(ctx, initKeyspace, initShard)
+ if err != nil {
+ return replication.Position{}, vterrors.Wrap(err, "can't read shard")
+ }
+ if topoproto.TabletAliasIsZero(si.PrimaryAlias) {
+ // Normal tablets will sit around waiting to be reparented in this case.
+ // Since vtbackup is a batch job, we just have to fail.
+ return replication.Position{}, fmt.Errorf("shard %v/%v has no primary", initKeyspace, initShard)
+ }
+ ti, err := ts.GetTablet(ctx, si.PrimaryAlias)
+ if err != nil {
+ return replication.Position{}, fmt.Errorf("can't get primary tablet record %v: %v", topoproto.TabletAliasString(si.PrimaryAlias), err)
+ }
+ posStr, err := tmc.PrimaryPosition(ctx, ti.Tablet)
+ if err != nil {
+ return replication.Position{}, fmt.Errorf("can't get primary replication position: %v", err)
+ }
+ pos, err := replication.DecodePosition(posStr)
+ if err != nil {
+ return replication.Position{}, fmt.Errorf("can't decode primary replication position %q: %v", posStr, err)
+ }
+ return pos, nil
+}
+
+// retryOnError keeps calling the given function until it succeeds, or the given
+// Context is done. It waits an exponentially increasing amount of time between
+// retries to avoid hot-looping. The only time this returns an error is if the
+// Context is cancelled.
+func retryOnError(ctx context.Context, fn func() error) error {
+ waitTime := 1 * time.Second
+
+ for {
+ err := fn()
+ if err == nil {
+ return nil
+ }
+ log.Errorf("Waiting %v to retry after error: %v", waitTime, err)
+
+ select {
+ case <-ctx.Done():
+ log.Errorf("Not retrying after error: %v", ctx.Err())
+ return ctx.Err()
+ case <-time.After(waitTime):
+ waitTime *= 2
+ }
+ }
+}
+
+func pruneBackups(ctx context.Context, backupStorage backupstorage.BackupStorage, backupDir string) error {
+ if minRetentionTime == 0 {
+ log.Info("Pruning of old backups is disabled.")
+ return nil
+ }
+ backups, err := backupStorage.ListBackups(ctx, backupDir)
+ if err != nil {
+ return fmt.Errorf("can't list backups: %v", err)
+ }
+ numBackups := len(backups)
+ if numBackups <= minRetentionCount {
+ log.Infof("Found %v backups. Not pruning any since this is within the min_retention_count of %v.", numBackups, minRetentionCount)
+ return nil
+ }
+ // We have more than the minimum retention count, so we could afford to
+ // prune some. See if any are beyond the minimum retention time.
+ // ListBackups returns them sorted by oldest first.
+ for _, backup := range backups {
+ backupTime, err := parseBackupTime(backup.Name())
+ if err != nil {
+ return err
+ }
+ if time.Since(backupTime) < minRetentionTime {
+ // The oldest remaining backup is not old enough to prune.
+ log.Infof("Oldest backup taken at %v has not reached min_retention_time of %v. Nothing left to prune.", backupTime, minRetentionTime)
+ break
+ }
+ // Remove the backup.
+ log.Infof("Removing old backup %v from %v, since it's older than min_retention_time of %v", backup.Name(), backupDir, minRetentionTime)
+ if err := backupStorage.RemoveBackup(ctx, backupDir, backup.Name()); err != nil {
+ return fmt.Errorf("couldn't remove backup %v from %v: %v", backup.Name(), backupDir, err)
+ }
+ // We successfully removed one backup. Can we afford to prune any more?
+ numBackups--
+ if numBackups == minRetentionCount {
+ log.Infof("Successfully pruned backup count to min_retention_count of %v.", minRetentionCount)
+ break
+ }
+ }
+ return nil
+}
+
+func parseBackupTime(name string) (time.Time, error) {
+ // Backup names are formatted as "date.time.tablet-alias".
+ parts := strings.Split(name, ".")
+ if len(parts) != 3 {
+ return time.Time{}, fmt.Errorf("backup name not in expected format (date.time.tablet-alias): %v", name)
+ }
+ backupTime, err := time.Parse(mysqlctl.BackupTimestampFormat, fmt.Sprintf("%s.%s", parts[0], parts[1]))
+ if err != nil {
+ return time.Time{}, fmt.Errorf("can't parse timestamp from backup %q: %v", name, err)
+ }
+ return backupTime, nil
+}
+
+func shouldBackup(ctx context.Context, topoServer *topo.Server, backupStorage backupstorage.BackupStorage, backupDir string) (bool, error) {
+ // Look for the most recent, complete backup.
+ backups, err := backupStorage.ListBackups(ctx, backupDir)
+ if err != nil {
+ return false, fmt.Errorf("can't list backups: %v", err)
+ }
+ lastBackup := lastCompleteBackup(ctx, backups)
+
+ // Check preconditions for initial_backup mode.
+ if initialBackup {
+ // Check if any backups for the shard already exist in this backup storage location.
+ if lastBackup != nil {
+ log.Infof("At least one complete backup already exists, so there's no need to seed an empty backup. Doing nothing.")
+ return false, nil
+ }
+
+ // Check whether the shard exists.
+ _, shardErr := topoServer.GetShard(ctx, initKeyspace, initShard)
+ switch {
+ case shardErr == nil:
+ // If the shard exists, we should make sure none of the tablets are
+ // already in a serving state, because then they might have data
+ // that conflicts with the initial backup we're about to take.
+ tablets, err := topoServer.GetTabletMapForShard(ctx, initKeyspace, initShard)
+ if err != nil {
+ // We don't know for sure whether any tablets are serving,
+ // so it's not safe to continue.
+ return false, fmt.Errorf("failed to check whether shard %v/%v has serving tablets before doing initial backup: %v", initKeyspace, initShard, err)
+ }
+ for tabletAlias, tablet := range tablets {
+ // Check if any tablet has its type set to one of the serving types.
+ // If so, it's too late to do an initial backup.
+ if tablet.IsInServingGraph() {
+ return false, fmt.Errorf("refusing to upload initial backup of empty database: the shard %v/%v already has at least one tablet that may be serving (%v); you must take a backup from a live tablet instead", initKeyspace, initShard, tabletAlias)
+ }
+ }
+ log.Infof("Shard %v/%v exists but has no serving tablets.", initKeyspace, initShard)
+ case topo.IsErrType(shardErr, topo.NoNode):
+ // The shard doesn't exist, so we know no tablets are running.
+ log.Infof("Shard %v/%v doesn't exist; assuming it has no serving tablets.", initKeyspace, initShard)
+ default:
+ // If we encounter any other error, we don't know for sure whether
+ // the shard exists, so it's not safe to continue.
+ return false, fmt.Errorf("failed to check whether shard %v/%v exists before doing initial backup: %v", initKeyspace, initShard, err)
+ }
+
+ log.Infof("Shard %v/%v has no existing backups. Creating initial backup.", initKeyspace, initShard)
+ return true, nil
+ }
+
+ // We need at least one backup so we can restore first, unless the user explicitly says we don't
+ if len(backups) == 0 && !allowFirstBackup {
+ return false, fmt.Errorf("no existing backups to restore from; backup is not possible since --initial_backup flag was not enabled")
+ }
+ if lastBackup == nil {
+ if allowFirstBackup {
+ // There's no complete backup, but we were told to take one from scratch anyway.
+ return true, nil
+ }
+ return false, fmt.Errorf("no complete backups to restore from; backup is not possible since --initial_backup flag was not enabled")
+ }
+
+ // Has it been long enough since the last complete backup to need a new one?
+ if minBackupInterval == 0 {
+ // No minimum interval is set, so always backup.
+ return true, nil
+ }
+ lastBackupTime, err := parseBackupTime(lastBackup.Name())
+ if err != nil {
+ return false, fmt.Errorf("can't check last backup time: %v", err)
+ }
+ if elapsedTime := time.Since(lastBackupTime); elapsedTime < minBackupInterval {
+ // It hasn't been long enough yet.
+ log.Infof("Skipping backup since only %v has elapsed since the last backup at %v, which is less than the min_backup_interval of %v.", elapsedTime, lastBackupTime, minBackupInterval)
+ return false, nil
+ }
+ // It has been long enough.
+ log.Infof("The last backup was taken at %v, which is older than the min_backup_interval of %v.", lastBackupTime, minBackupInterval)
+ return true, nil
+}
+
+func lastCompleteBackup(ctx context.Context, backups []backupstorage.BackupHandle) backupstorage.BackupHandle {
+ if len(backups) == 0 {
+ return nil
+ }
+
+ // Backups are sorted in ascending order by start time. Start at the end.
+ for i := len(backups) - 1; i >= 0; i-- {
+ // Check if this backup is complete by looking for the MANIFEST file,
+ // which is written at the end after all files are uploaded.
+ backup := backups[i]
+ if err := checkBackupComplete(ctx, backup); err != nil {
+ log.Warningf("Ignoring backup %v because it's incomplete: %v", backup.Name(), err)
+ continue
+ }
+ return backup
+ }
+
+ return nil
+}
+
+func checkBackupComplete(ctx context.Context, backup backupstorage.BackupHandle) error {
+ manifest, err := mysqlctl.GetBackupManifest(ctx, backup)
+ if err != nil {
+ return fmt.Errorf("can't get backup MANIFEST: %v", err)
+ }
+
+ log.Infof("Found complete backup %v taken at position %v", backup.Name(), manifest.Position.String())
+ return nil
+}
diff --git a/go/cmd/vtbackup/docgen/main.go b/go/cmd/vtbackup/docgen/main.go
new file mode 100644
index 00000000000..90aa90ffa98
--- /dev/null
+++ b/go/cmd/vtbackup/docgen/main.go
@@ -0,0 +1,37 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/cmd/internal/docgen"
+ "vitess.io/vitess/go/cmd/vtbackup/cli"
+)
+
+func main() {
+ var dir string
+ cmd := cobra.Command{
+ Use: "docgen [-d ]",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return docgen.GenerateMarkdownTree(cli.Main, dir)
+ },
+ }
+
+ cmd.Flags().StringVarP(&dir, "dir", "d", "doc", "output directory to write documentation")
+ _ = cmd.Execute()
+}
diff --git a/go/cmd/vtbackup/vtbackup.go b/go/cmd/vtbackup/vtbackup.go
index ff99ab249e1..37dcadc9b19 100644
--- a/go/cmd/vtbackup/vtbackup.go
+++ b/go/cmd/vtbackup/vtbackup.go
@@ -14,763 +14,19 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-/*
-vtbackup is a batch command to perform a single pass of backup maintenance for a shard.
-
-When run periodically for each shard, vtbackup can ensure these configurable policies:
-* There is always a recent backup for the shard.
-* Old backups for the shard are removed.
-
-Whatever system launches vtbackup is responsible for the following:
- - Running vtbackup with similar flags that would be used for a vttablet and
- mysqlctld in the target shard to be backed up.
- - Provisioning as much disk space for vtbackup as would be given to vttablet.
- The data directory MUST be empty at startup. Do NOT reuse a persistent disk.
- - Running vtbackup periodically for each shard, for each backup storage location.
- - Ensuring that at most one instance runs at a time for a given pair of shard
- and backup storage location.
- - Retrying vtbackup if it fails.
- - Alerting human operators if the failure is persistent.
-
-The process vtbackup follows to take a new backup is as follows:
- 1. Restore from the most recent backup.
- 2. Start a mysqld instance (but no vttablet) from the restored data.
- 3. Instruct mysqld to connect to the current shard primary and replicate any
- transactions that are new since the last backup.
- 4. Ask the primary for its current replication position and set that as the goal
- for catching up on replication before taking the backup, so the goalposts
- don't move.
- 5. Wait until replication is caught up to the goal position or beyond.
- 6. Stop mysqld and take a new backup.
-
-Aside from additional replication load while vtbackup's mysqld catches up on
-new transactions, the shard should be otherwise unaffected. Existing tablets
-will continue to serve, and no new tablets will appear in topology, meaning no
-query traffic will ever be routed to vtbackup's mysqld. This silent operation
-mode helps make backups minimally disruptive to serving capacity and orthogonal
-to the handling of the query path.
-
-The command-line parameters to vtbackup specify a policy for when a new backup
-is needed, and when old backups should be removed. If the existing backups
-already satisfy the policy, then vtbackup will do nothing and return success
-immediately.
-*/
package main
import (
- "context"
- "crypto/rand"
- "fmt"
- "math"
- "math/big"
- "os"
- "strings"
- "syscall"
- "time"
-
- "github.com/spf13/pflag"
-
- "vitess.io/vitess/go/acl"
- "vitess.io/vitess/go/cmd"
+ "vitess.io/vitess/go/cmd/vtbackup/cli"
"vitess.io/vitess/go/exit"
- "vitess.io/vitess/go/mysql"
- "vitess.io/vitess/go/stats"
- "vitess.io/vitess/go/vt/dbconfigs"
"vitess.io/vitess/go/vt/log"
- "vitess.io/vitess/go/vt/logutil"
- "vitess.io/vitess/go/vt/mysqlctl"
- "vitess.io/vitess/go/vt/mysqlctl/backupstats"
- "vitess.io/vitess/go/vt/mysqlctl/backupstorage"
- topodatapb "vitess.io/vitess/go/vt/proto/topodata"
- "vitess.io/vitess/go/vt/servenv"
- "vitess.io/vitess/go/vt/topo"
- "vitess.io/vitess/go/vt/topo/topoproto"
- "vitess.io/vitess/go/vt/vterrors"
- _ "vitess.io/vitess/go/vt/vttablet/grpctmclient"
- "vitess.io/vitess/go/vt/vttablet/tmclient"
)
-const (
- // operationTimeout is the timeout for individual operations like fetching
- // the primary position. This does not impose an overall timeout on
- // long-running processes like taking the backup. It only applies to
- // steps along the way that should complete quickly. This ensures we don't
- // place a hard cap on the overall time for a backup, while also not waiting
- // forever for things that should be quick.
- operationTimeout = 1 * time.Minute
-)
-
-var (
- minBackupInterval time.Duration
- minRetentionTime time.Duration
- minRetentionCount = 1
- initialBackup bool
- allowFirstBackup bool
- restartBeforeBackup bool
- // vttablet-like flags
- initDbNameOverride string
- initKeyspace string
- initShard string
- concurrency = 4
- incrementalFromPos string
- // mysqlctld-like flags
- mysqlPort = 3306
- mysqlSocket string
- mysqlTimeout = 5 * time.Minute
- initDBSQLFile string
- detachedMode bool
- keepAliveTimeout = 0 * time.Second
- disableRedoLog = false
- durationByPhase = stats.NewGaugesWithSingleLabel(
- "DurationByPhaseSeconds",
- "How long it took vtbackup to perform each phase (in seconds).",
- "phase",
- )
-)
-
-func registerFlags(fs *pflag.FlagSet) {
- fs.DurationVar(&minBackupInterval, "min_backup_interval", minBackupInterval, "Only take a new backup if it's been at least this long since the most recent backup.")
- fs.DurationVar(&minRetentionTime, "min_retention_time", minRetentionTime, "Keep each old backup for at least this long before removing it. Set to 0 to disable pruning of old backups.")
- fs.IntVar(&minRetentionCount, "min_retention_count", minRetentionCount, "Always keep at least this many of the most recent backups in this backup storage location, even if some are older than the min_retention_time. This must be at least 1 since a backup must always exist to allow new backups to be made")
- fs.BoolVar(&initialBackup, "initial_backup", initialBackup, "Instead of restoring from backup, initialize an empty database with the provided init_db_sql_file and upload a backup of that for the shard, if the shard has no backups yet. This can be used to seed a brand new shard with an initial, empty backup. If any backups already exist for the shard, this will be considered a successful no-op. This can only be done before the shard exists in topology (i.e. before any tablets are deployed).")
- fs.BoolVar(&allowFirstBackup, "allow_first_backup", allowFirstBackup, "Allow this job to take the first backup of an existing shard.")
- fs.BoolVar(&restartBeforeBackup, "restart_before_backup", restartBeforeBackup, "Perform a mysqld clean/full restart after applying binlogs, but before taking the backup. Only makes sense to work around xtrabackup bugs.")
- // vttablet-like flags
- fs.StringVar(&initDbNameOverride, "init_db_name_override", initDbNameOverride, "(init parameter) override the name of the db used by vttablet")
- fs.StringVar(&initKeyspace, "init_keyspace", initKeyspace, "(init parameter) keyspace to use for this tablet")
- fs.StringVar(&initShard, "init_shard", initShard, "(init parameter) shard to use for this tablet")
- fs.IntVar(&concurrency, "concurrency", concurrency, "(init restore parameter) how many concurrent files to restore at once")
- fs.StringVar(&incrementalFromPos, "incremental_from_pos", incrementalFromPos, "Position of previous backup. Default: empty. If given, then this backup becomes an incremental backup from given position. If value is 'auto', backup taken from last successful backup position")
- // mysqlctld-like flags
- fs.IntVar(&mysqlPort, "mysql_port", mysqlPort, "mysql port")
- fs.StringVar(&mysqlSocket, "mysql_socket", mysqlSocket, "path to the mysql socket")
- fs.DurationVar(&mysqlTimeout, "mysql_timeout", mysqlTimeout, "how long to wait for mysqld startup")
- fs.StringVar(&initDBSQLFile, "init_db_sql_file", initDBSQLFile, "path to .sql file to run after mysql_install_db")
- fs.BoolVar(&detachedMode, "detach", detachedMode, "detached mode - run backups detached from the terminal")
- fs.DurationVar(&keepAliveTimeout, "keep-alive-timeout", keepAliveTimeout, "Wait until timeout elapses after a successful backup before shutting down.")
- fs.BoolVar(&disableRedoLog, "disable-redo-log", disableRedoLog, "Disable InnoDB redo log during replication-from-primary phase of backup.")
-
- acl.RegisterFlags(fs)
-}
-
-func init() {
- servenv.RegisterDefaultFlags()
- dbconfigs.RegisterFlags(dbconfigs.All...)
- mysqlctl.RegisterFlags()
- servenv.OnParse(registerFlags)
-}
-
func main() {
defer exit.Recover()
- servenv.ParseFlags("vtbackup")
- servenv.Init()
- ctx, cancel := context.WithCancel(context.Background())
- servenv.OnClose(func() {
- cancel()
- })
-
- defer func() {
- servenv.ExitChan <- syscall.SIGTERM
- <-ctx.Done()
- }()
-
- go servenv.RunDefault()
-
- if detachedMode {
- // this method will call os.Exit and kill this process
- cmd.DetachFromTerminalAndExit()
- }
-
- defer logutil.Flush()
-
- if minRetentionCount < 1 {
- log.Errorf("min_retention_count must be at least 1 to allow restores to succeed")
- exit.Return(1)
- }
-
- // Open connection backup storage.
- backupStorage, err := backupstorage.GetBackupStorage()
- if err != nil {
- log.Errorf("Can't get backup storage: %v", err)
+ if err := cli.Main.Execute(); err != nil {
+ log.Error(err)
exit.Return(1)
}
- defer backupStorage.Close()
- // Open connection to topology server.
- topoServer := topo.Open()
- defer topoServer.Close()
-
- // Try to take a backup, if it's been long enough since the last one.
- // Skip pruning if backup wasn't fully successful. We don't want to be
- // deleting things if the backup process is not healthy.
- backupDir := mysqlctl.GetBackupDir(initKeyspace, initShard)
- doBackup, err := shouldBackup(ctx, topoServer, backupStorage, backupDir)
- if err != nil {
- log.Errorf("Can't take backup: %v", err)
- exit.Return(1)
- }
- if doBackup {
- if err := takeBackup(ctx, topoServer, backupStorage); err != nil {
- log.Errorf("Failed to take backup: %v", err)
- exit.Return(1)
- }
- }
-
- // Prune old backups.
- if err := pruneBackups(ctx, backupStorage, backupDir); err != nil {
- log.Errorf("Couldn't prune old backups: %v", err)
- exit.Return(1)
- }
-
- if keepAliveTimeout > 0 {
- log.Infof("Backup was successful, waiting %s before exiting (or until context expires).", keepAliveTimeout)
- select {
- case <-time.After(keepAliveTimeout):
- case <-ctx.Done():
- }
- }
- log.Info("Exiting.")
-}
-
-func takeBackup(ctx context.Context, topoServer *topo.Server, backupStorage backupstorage.BackupStorage) error {
- // This is an imaginary tablet alias. The value doesn't matter for anything,
- // except that we generate a random UID to ensure the target backup
- // directory is unique if multiple vtbackup instances are launched for the
- // same shard, at exactly the same second, pointed at the same backup
- // storage location.
- bigN, err := rand.Int(rand.Reader, big.NewInt(math.MaxUint32))
- if err != nil {
- return fmt.Errorf("can't generate random tablet UID: %v", err)
- }
- tabletAlias := &topodatapb.TabletAlias{
- Cell: "vtbackup",
- Uid: uint32(bigN.Uint64()),
- }
-
- // Clean up our temporary data dir if we exit for any reason, to make sure
- // every invocation of vtbackup starts with a clean slate, and it does not
- // accumulate garbage (and run out of disk space) if it's restarted.
- tabletDir := mysqlctl.TabletDir(tabletAlias.Uid)
- defer func() {
- log.Infof("Removing temporary tablet directory: %v", tabletDir)
- if err := os.RemoveAll(tabletDir); err != nil {
- log.Warningf("Failed to remove temporary tablet directory: %v", err)
- }
- }()
-
- // Start up mysqld as if we are mysqlctld provisioning a fresh tablet.
- mysqld, mycnf, err := mysqlctl.CreateMysqldAndMycnf(tabletAlias.Uid, mysqlSocket, mysqlPort)
- if err != nil {
- return fmt.Errorf("failed to initialize mysql config: %v", err)
- }
- initCtx, initCancel := context.WithTimeout(ctx, mysqlTimeout)
- defer initCancel()
- initMysqldAt := time.Now()
- if err := mysqld.Init(initCtx, mycnf, initDBSQLFile); err != nil {
- return fmt.Errorf("failed to initialize mysql data dir and start mysqld: %v", err)
- }
- durationByPhase.Set("InitMySQLd", int64(time.Since(initMysqldAt).Seconds()))
- // Shut down mysqld when we're done.
- defer func() {
- // Be careful not to use the original context, because we don't want to
- // skip shutdown just because we timed out waiting for other things.
- mysqlShutdownCtx, mysqlShutdownCancel := context.WithTimeout(context.Background(), 30*time.Second)
- defer mysqlShutdownCancel()
- if err := mysqld.Shutdown(mysqlShutdownCtx, mycnf, false); err != nil {
- log.Errorf("failed to shutdown mysqld: %v", err)
- }
- }()
-
- extraEnv := map[string]string{
- "TABLET_ALIAS": topoproto.TabletAliasString(tabletAlias),
- }
- dbName := initDbNameOverride
- if dbName == "" {
- dbName = fmt.Sprintf("%s%s", topoproto.VtDbPrefix, initKeyspace)
- }
-
- backupParams := mysqlctl.BackupParams{
- Cnf: mycnf,
- Mysqld: mysqld,
- Logger: logutil.NewConsoleLogger(),
- Concurrency: concurrency,
- IncrementalFromPos: incrementalFromPos,
- HookExtraEnv: extraEnv,
- TopoServer: topoServer,
- Keyspace: initKeyspace,
- Shard: initShard,
- TabletAlias: topoproto.TabletAliasString(tabletAlias),
- Stats: backupstats.BackupStats(),
- }
- // In initial_backup mode, just take a backup of this empty database.
- if initialBackup {
- // Take a backup of this empty DB without restoring anything.
- // First, initialize it the way InitShardPrimary would, so this backup
- // produces a result that can be used to skip InitShardPrimary entirely.
- // This involves resetting replication (to erase any history) and then
- // creating the main database and some Vitess system tables.
- if err := mysqld.ResetReplication(ctx); err != nil {
- return fmt.Errorf("can't reset replication: %v", err)
- }
- // We need to switch off super_read_only before we create the database.
- resetFunc, err := mysqld.SetSuperReadOnly(false)
- if err != nil {
- return fmt.Errorf("failed to disable super_read_only during backup: %v", err)
- }
- if resetFunc != nil {
- defer func() {
- err := resetFunc()
- if err != nil {
- log.Error("Failed to set super_read_only back to its original value during backup")
- }
- }()
- }
- cmd := mysqlctl.GenerateInitialBinlogEntry()
- if err := mysqld.ExecuteSuperQueryList(ctx, []string{cmd}); err != nil {
- return err
- }
-
- backupParams.BackupTime = time.Now()
- // Now we're ready to take the backup.
- if err := mysqlctl.Backup(ctx, backupParams); err != nil {
- return fmt.Errorf("backup failed: %v", err)
- }
- durationByPhase.Set("InitialBackup", int64(time.Since(backupParams.BackupTime).Seconds()))
- log.Info("Initial backup successful.")
- return nil
- }
-
- backupDir := mysqlctl.GetBackupDir(initKeyspace, initShard)
- log.Infof("Restoring latest backup from directory %v", backupDir)
- restoreAt := time.Now()
- params := mysqlctl.RestoreParams{
- Cnf: mycnf,
- Mysqld: mysqld,
- Logger: logutil.NewConsoleLogger(),
- Concurrency: concurrency,
- HookExtraEnv: extraEnv,
- DeleteBeforeRestore: true,
- DbName: dbName,
- Keyspace: initKeyspace,
- Shard: initShard,
- Stats: backupstats.RestoreStats(),
- }
- backupManifest, err := mysqlctl.Restore(ctx, params)
- var restorePos mysql.Position
- switch err {
- case nil:
- // if err is nil, we expect backupManifest to be non-nil
- restorePos = backupManifest.Position
- log.Infof("Successfully restored from backup at replication position %v", restorePos)
- case mysqlctl.ErrNoBackup:
- // There is no backup found, but we may be taking the initial backup of a shard
- if !allowFirstBackup {
- return fmt.Errorf("no backup found; not starting up empty since --initial_backup flag was not enabled")
- }
- restorePos = mysql.Position{}
- default:
- return fmt.Errorf("can't restore from backup: %v", err)
- }
- durationByPhase.Set("RestoreLastBackup", int64(time.Since(restoreAt).Seconds()))
-
- // As of MySQL 8.0.21, you can disable redo logging using the ALTER INSTANCE
- // DISABLE INNODB REDO_LOG statement. This functionality is intended for
- // loading data into a new MySQL instance. Disabling redo logging speeds up
- // data loading by avoiding redo log writes and doublewrite buffering.
- disabledRedoLog := false
- if disableRedoLog {
- if err := mysqld.DisableRedoLog(ctx); err != nil {
- log.Warningf("Error disabling redo logging: %v", err)
- } else {
- disabledRedoLog = true
- }
- }
-
- // We have restored a backup. Now start replication.
- if err := resetReplication(ctx, restorePos, mysqld); err != nil {
- return fmt.Errorf("error resetting replication: %v", err)
- }
- if err := startReplication(ctx, mysqld, topoServer); err != nil {
- return fmt.Errorf("error starting replication: %v", err)
- }
-
- log.Info("get the current primary replication position, and wait until we catch up")
- // Get the current primary replication position, and wait until we catch up
- // to that point. We do this instead of looking at ReplicationLag
- // because that value can
- // sometimes lie and tell you there's 0 lag when actually replication is
- // stopped. Also, if replication is making progress but is too slow to ever
- // catch up to live changes, we'd rather take a backup of something rather
- // than timing out.
- tmc := tmclient.NewTabletManagerClient()
- // Keep retrying if we can't contact the primary. The primary might be
- // changing, moving, or down temporarily.
- var primaryPos mysql.Position
- err = retryOnError(ctx, func() error {
- // Add a per-operation timeout so we re-read topo if the primary is unreachable.
- opCtx, optCancel := context.WithTimeout(ctx, operationTimeout)
- defer optCancel()
- pos, err := getPrimaryPosition(opCtx, tmc, topoServer)
- if err != nil {
- return fmt.Errorf("can't get the primary replication position: %v", err)
- }
- primaryPos = pos
- return nil
- })
- if err != nil {
- return fmt.Errorf("can't get the primary replication position after all retries: %v", err)
- }
-
- log.Infof("takeBackup: primary position is: %s", primaryPos.String())
-
- // Remember the time when we fetched the primary position, not when we caught
- // up to it, so the timestamp on our backup is honest (assuming we make it
- // to the goal position).
- backupParams.BackupTime = time.Now()
-
- // Wait for replication to catch up.
- waitStartTime := time.Now()
- for {
- select {
- case <-ctx.Done():
- return fmt.Errorf("error in replication catch up: %v", ctx.Err())
- case <-time.After(time.Second):
- }
-
- status, statusErr := mysqld.ReplicationStatus()
- if statusErr != nil {
- log.Warningf("Error getting replication status: %v", statusErr)
- continue
- }
- if status.Position.AtLeast(primaryPos) {
- // We're caught up on replication to at least the point the primary
- // was at when this vtbackup run started.
- log.Infof("Replication caught up to %v after %v", status.Position, time.Since(waitStartTime))
- durationByPhase.Set("CatchUpReplication", int64(time.Since(waitStartTime).Seconds()))
- break
- }
- if !status.Healthy() {
- log.Warning("Replication has stopped before backup could be taken. Trying to restart replication.")
- if err := startReplication(ctx, mysqld, topoServer); err != nil {
- log.Warningf("Failed to restart replication: %v", err)
- }
- }
- }
-
- // Stop replication and see where we are.
- if err := mysqld.StopReplication(nil); err != nil {
- return fmt.Errorf("can't stop replication: %v", err)
- }
-
- // Did we make any progress?
- status, err := mysqld.ReplicationStatus()
- if err != nil {
- return fmt.Errorf("can't get replication status: %v", err)
- }
- log.Infof("Replication caught up to %v", status.Position)
- if !status.Position.AtLeast(primaryPos) && status.Position.Equal(restorePos) {
- return fmt.Errorf("not taking backup: replication did not make any progress from restore point: %v", restorePos)
- }
-
- // Re-enable redo logging.
- if disabledRedoLog {
- if err := mysqld.EnableRedoLog(ctx); err != nil {
- return fmt.Errorf("failed to re-enable redo log: %v", err)
- }
- }
-
- if restartBeforeBackup {
- restartAt := time.Now()
- log.Info("Proceeding with clean MySQL shutdown and startup to flush all buffers.")
- // Prep for full/clean shutdown (not typically the default)
- if err := mysqld.ExecuteSuperQuery(ctx, "SET GLOBAL innodb_fast_shutdown=0"); err != nil {
- return fmt.Errorf("Could not prep for full shutdown: %v", err)
- }
- // Shutdown, waiting for it to finish
- if err := mysqld.Shutdown(ctx, mycnf, true); err != nil {
- return fmt.Errorf("Something went wrong during full MySQL shutdown: %v", err)
- }
- // Start MySQL, waiting for it to come up
- if err := mysqld.Start(ctx, mycnf); err != nil {
- return fmt.Errorf("Could not start MySQL after full shutdown: %v", err)
- }
- durationByPhase.Set("RestartBeforeBackup", int64(time.Since(restartAt).Seconds()))
- }
-
- // Now we can take a new backup.
- backupAt := time.Now()
- if err := mysqlctl.Backup(ctx, backupParams); err != nil {
- return fmt.Errorf("error taking backup: %v", err)
- }
- durationByPhase.Set("TakeNewBackup", int64(time.Since(backupAt).Seconds()))
-
- // Return a non-zero exit code if we didn't meet the replication position
- // goal, even though we took a backup that pushes the high-water mark up.
- if !status.Position.AtLeast(primaryPos) {
- return fmt.Errorf("replication caught up to %v but didn't make it to the goal of %v; a backup was taken anyway to save partial progress, but the operation should still be retried since not all expected data is backed up", status.Position, primaryPos)
- }
- log.Info("Backup successful.")
- return nil
-}
-
-func resetReplication(ctx context.Context, pos mysql.Position, mysqld mysqlctl.MysqlDaemon) error {
- cmds := []string{
- "STOP SLAVE",
- "RESET SLAVE ALL", // "ALL" makes it forget replication source host:port.
- }
- if err := mysqld.ExecuteSuperQueryList(ctx, cmds); err != nil {
- return vterrors.Wrap(err, "failed to reset replication")
- }
-
- // Check if we have a position to resume from, if not reset to the beginning of time
- if !pos.IsZero() {
- // Set the position at which to resume from the replication source.
- if err := mysqld.SetReplicationPosition(ctx, pos); err != nil {
- return vterrors.Wrap(err, "failed to set replica position")
- }
- } else {
- if err := mysqld.ResetReplication(ctx); err != nil {
- return vterrors.Wrap(err, "failed to reset replication")
- }
- }
- return nil
-}
-
-func startReplication(ctx context.Context, mysqld mysqlctl.MysqlDaemon, topoServer *topo.Server) error {
- si, err := topoServer.GetShard(ctx, initKeyspace, initShard)
- if err != nil {
- return vterrors.Wrap(err, "can't read shard")
- }
- if topoproto.TabletAliasIsZero(si.PrimaryAlias) {
- // Normal tablets will sit around waiting to be reparented in this case.
- // Since vtbackup is a batch job, we just have to fail.
- return fmt.Errorf("can't start replication after restore: shard %v/%v has no primary", initKeyspace, initShard)
- }
- // TODO(enisoc): Support replicating from another replica, preferably in the
- // same cell, preferably rdonly, to reduce load on the primary.
- ti, err := topoServer.GetTablet(ctx, si.PrimaryAlias)
- if err != nil {
- return vterrors.Wrapf(err, "Cannot read primary tablet %v", si.PrimaryAlias)
- }
-
- // Stop replication (in case we're restarting), set replication source, and start replication.
- if err := mysqld.SetReplicationSource(ctx, ti.Tablet.MysqlHostname, ti.Tablet.MysqlPort, true /* stopReplicationBefore */, true /* startReplicationAfter */); err != nil {
- return vterrors.Wrap(err, "MysqlDaemon.SetReplicationSource failed")
- }
- return nil
-}
-
-func getPrimaryPosition(ctx context.Context, tmc tmclient.TabletManagerClient, ts *topo.Server) (mysql.Position, error) {
- si, err := ts.GetShard(ctx, initKeyspace, initShard)
- if err != nil {
- return mysql.Position{}, vterrors.Wrap(err, "can't read shard")
- }
- if topoproto.TabletAliasIsZero(si.PrimaryAlias) {
- // Normal tablets will sit around waiting to be reparented in this case.
- // Since vtbackup is a batch job, we just have to fail.
- return mysql.Position{}, fmt.Errorf("shard %v/%v has no primary", initKeyspace, initShard)
- }
- ti, err := ts.GetTablet(ctx, si.PrimaryAlias)
- if err != nil {
- return mysql.Position{}, fmt.Errorf("can't get primary tablet record %v: %v", topoproto.TabletAliasString(si.PrimaryAlias), err)
- }
- posStr, err := tmc.PrimaryPosition(ctx, ti.Tablet)
- if err != nil {
- return mysql.Position{}, fmt.Errorf("can't get primary replication position: %v", err)
- }
- pos, err := mysql.DecodePosition(posStr)
- if err != nil {
- return mysql.Position{}, fmt.Errorf("can't decode primary replication position %q: %v", posStr, err)
- }
- return pos, nil
-}
-
-// retryOnError keeps calling the given function until it succeeds, or the given
-// Context is done. It waits an exponentially increasing amount of time between
-// retries to avoid hot-looping. The only time this returns an error is if the
-// Context is cancelled.
-func retryOnError(ctx context.Context, fn func() error) error {
- waitTime := 1 * time.Second
-
- for {
- err := fn()
- if err == nil {
- return nil
- }
- log.Errorf("Waiting %v to retry after error: %v", waitTime, err)
-
- select {
- case <-ctx.Done():
- log.Errorf("Not retrying after error: %v", ctx.Err())
- return ctx.Err()
- case <-time.After(waitTime):
- waitTime *= 2
- }
- }
-}
-
-func pruneBackups(ctx context.Context, backupStorage backupstorage.BackupStorage, backupDir string) error {
- if minRetentionTime == 0 {
- log.Info("Pruning of old backups is disabled.")
- return nil
- }
- backups, err := backupStorage.ListBackups(ctx, backupDir)
- if err != nil {
- return fmt.Errorf("can't list backups: %v", err)
- }
- numBackups := len(backups)
- if numBackups <= minRetentionCount {
- log.Infof("Found %v backups. Not pruning any since this is within the min_retention_count of %v.", numBackups, minRetentionCount)
- return nil
- }
- // We have more than the minimum retention count, so we could afford to
- // prune some. See if any are beyond the minimum retention time.
- // ListBackups returns them sorted by oldest first.
- for _, backup := range backups {
- backupTime, err := parseBackupTime(backup.Name())
- if err != nil {
- return err
- }
- if time.Since(backupTime) < minRetentionTime {
- // The oldest remaining backup is not old enough to prune.
- log.Infof("Oldest backup taken at %v has not reached min_retention_time of %v. Nothing left to prune.", backupTime, minRetentionTime)
- break
- }
- // Remove the backup.
- log.Infof("Removing old backup %v from %v, since it's older than min_retention_time of %v", backup.Name(), backupDir, minRetentionTime)
- if err := backupStorage.RemoveBackup(ctx, backupDir, backup.Name()); err != nil {
- return fmt.Errorf("couldn't remove backup %v from %v: %v", backup.Name(), backupDir, err)
- }
- // We successfully removed one backup. Can we afford to prune any more?
- numBackups--
- if numBackups == minRetentionCount {
- log.Infof("Successfully pruned backup count to min_retention_count of %v.", minRetentionCount)
- break
- }
- }
- return nil
-}
-
-func parseBackupTime(name string) (time.Time, error) {
- // Backup names are formatted as "date.time.tablet-alias".
- parts := strings.Split(name, ".")
- if len(parts) != 3 {
- return time.Time{}, fmt.Errorf("backup name not in expected format (date.time.tablet-alias): %v", name)
- }
- backupTime, err := time.Parse(mysqlctl.BackupTimestampFormat, fmt.Sprintf("%s.%s", parts[0], parts[1]))
- if err != nil {
- return time.Time{}, fmt.Errorf("can't parse timestamp from backup %q: %v", name, err)
- }
- return backupTime, nil
-}
-
-func shouldBackup(ctx context.Context, topoServer *topo.Server, backupStorage backupstorage.BackupStorage, backupDir string) (bool, error) {
- // Look for the most recent, complete backup.
- backups, err := backupStorage.ListBackups(ctx, backupDir)
- if err != nil {
- return false, fmt.Errorf("can't list backups: %v", err)
- }
- lastBackup := lastCompleteBackup(ctx, backups)
-
- // Check preconditions for initial_backup mode.
- if initialBackup {
- // Check if any backups for the shard already exist in this backup storage location.
- if lastBackup != nil {
- log.Infof("At least one complete backup already exists, so there's no need to seed an empty backup. Doing nothing.")
- return false, nil
- }
-
- // Check whether the shard exists.
- _, shardErr := topoServer.GetShard(ctx, initKeyspace, initShard)
- switch {
- case shardErr == nil:
- // If the shard exists, we should make sure none of the tablets are
- // already in a serving state, because then they might have data
- // that conflicts with the initial backup we're about to take.
- tablets, err := topoServer.GetTabletMapForShard(ctx, initKeyspace, initShard)
- if err != nil {
- // We don't know for sure whether any tablets are serving,
- // so it's not safe to continue.
- return false, fmt.Errorf("failed to check whether shard %v/%v has serving tablets before doing initial backup: %v", initKeyspace, initShard, err)
- }
- for tabletAlias, tablet := range tablets {
- // Check if any tablet has its type set to one of the serving types.
- // If so, it's too late to do an initial backup.
- if tablet.IsInServingGraph() {
- return false, fmt.Errorf("refusing to upload initial backup of empty database: the shard %v/%v already has at least one tablet that may be serving (%v); you must take a backup from a live tablet instead", initKeyspace, initShard, tabletAlias)
- }
- }
- log.Infof("Shard %v/%v exists but has no serving tablets.", initKeyspace, initShard)
- case topo.IsErrType(shardErr, topo.NoNode):
- // The shard doesn't exist, so we know no tablets are running.
- log.Infof("Shard %v/%v doesn't exist; assuming it has no serving tablets.", initKeyspace, initShard)
- default:
- // If we encounter any other error, we don't know for sure whether
- // the shard exists, so it's not safe to continue.
- return false, fmt.Errorf("failed to check whether shard %v/%v exists before doing initial backup: %v", initKeyspace, initShard, err)
- }
-
- log.Infof("Shard %v/%v has no existing backups. Creating initial backup.", initKeyspace, initShard)
- return true, nil
- }
-
- // We need at least one backup so we can restore first, unless the user explicitly says we don't
- if len(backups) == 0 && !allowFirstBackup {
- return false, fmt.Errorf("no existing backups to restore from; backup is not possible since --initial_backup flag was not enabled")
- }
- if lastBackup == nil {
- if allowFirstBackup {
- // There's no complete backup, but we were told to take one from scratch anyway.
- return true, nil
- }
- return false, fmt.Errorf("no complete backups to restore from; backup is not possible since --initial_backup flag was not enabled")
- }
-
- // Has it been long enough since the last complete backup to need a new one?
- if minBackupInterval == 0 {
- // No minimum interval is set, so always backup.
- return true, nil
- }
- lastBackupTime, err := parseBackupTime(lastBackup.Name())
- if err != nil {
- return false, fmt.Errorf("can't check last backup time: %v", err)
- }
- if elapsedTime := time.Since(lastBackupTime); elapsedTime < minBackupInterval {
- // It hasn't been long enough yet.
- log.Infof("Skipping backup since only %v has elapsed since the last backup at %v, which is less than the min_backup_interval of %v.", elapsedTime, lastBackupTime, minBackupInterval)
- return false, nil
- }
- // It has been long enough.
- log.Infof("The last backup was taken at %v, which is older than the min_backup_interval of %v.", lastBackupTime, minBackupInterval)
- return true, nil
-}
-
-func lastCompleteBackup(ctx context.Context, backups []backupstorage.BackupHandle) backupstorage.BackupHandle {
- if len(backups) == 0 {
- return nil
- }
-
- // Backups are sorted in ascending order by start time. Start at the end.
- for i := len(backups) - 1; i >= 0; i-- {
- // Check if this backup is complete by looking for the MANIFEST file,
- // which is written at the end after all files are uploaded.
- backup := backups[i]
- if err := checkBackupComplete(ctx, backup); err != nil {
- log.Warningf("Ignoring backup %v because it's incomplete: %v", backup.Name(), err)
- continue
- }
- return backup
- }
-
- return nil
-}
-
-func checkBackupComplete(ctx context.Context, backup backupstorage.BackupHandle) error {
- manifest, err := mysqlctl.GetBackupManifest(ctx, backup)
- if err != nil {
- return fmt.Errorf("can't get backup MANIFEST: %v", err)
- }
-
- log.Infof("Found complete backup %v taken at position %v", backup.Name(), manifest.Position.String())
- return nil
}
diff --git a/go/cmd/vtbench/cli/vtbench.go b/go/cmd/vtbench/cli/vtbench.go
new file mode 100644
index 00000000000..69b866bb60d
--- /dev/null
+++ b/go/cmd/vtbench/cli/vtbench.go
@@ -0,0 +1,246 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cli
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/acl"
+ "vitess.io/vitess/go/vt/dbconfigs"
+ "vitess.io/vitess/go/vt/grpccommon"
+ "vitess.io/vitess/go/vt/logutil"
+ "vitess.io/vitess/go/vt/servenv"
+ "vitess.io/vitess/go/vtbench"
+
+ // Import and register the gRPC vtgateconn client
+ _ "vitess.io/vitess/go/vt/vtgate/grpcvtgateconn"
+ // Import and register the gRPC tabletconn client
+ _ "vitess.io/vitess/go/vt/vttablet/grpctabletconn"
+)
+
+/*
+
+ Vtbench is a simple load testing client to compare workloads in
+ Vitess across the various client/server protocols.
+
+ There are a number of command line options to control the behavior,
+ but as a basic example, the three supported client protocols are:
+
+ Mysql protocol to vtgate:
+ vtbench \
+ --protocol mysql \
+ --host vtgate-host.my.domain \
+ --port 15306 \
+ --user db_username \
+ --db-credentials-file ./vtbench_db_creds.json \
+ --db @replica \
+ --sql "select * from loadtest_table where id=123456789" \
+ --threads 10 \
+ --count 10
+
+ GRPC to vtgate:
+ vtbench \
+ --protocol grpc-vtgate \
+ --host vtgate-host.my.domain \
+ --port 15999 \
+ --db @replica \
+ $VTTABLET_GRPC_ARGS \
+ --sql "select * from loadtest_table where id=123456789" \
+ --threads 10 \
+ --count 10
+
+ GRPC to vttablet:
+ vtbench \
+ --protocol grpc-vttablet \
+ --host tablet-loadtest-00-80.my.domain \
+ --port 15999 \
+ --db loadtest/00-80@replica \
+ --sql "select * from loadtest_table where id=123456789" \
+ --threads 10 \
+ --count 10
+
+*/
+
+var (
+ host, unixSocket, user, db, sql string
+ port int
+ protocol = "mysql"
+ deadline = 5 * time.Minute
+ threads = 2
+ count = 1000
+
+ Main = &cobra.Command{
+ Use: "vtbench",
+ Short: "vtbench is a simple load testing client to compare workloads in Vitess across the various client/server protocols.",
+ Example: `There are a number of command line options to control the behavior,
+but as a basic example, the three supported client protocols are:
+
+Mysql protocol to vtgate:
+vtbench \
+ --protocol mysql \
+ --host vtgate-host.my.domain \
+ --port 15306 \
+ --user db_username \
+ --db-credentials-file ./vtbench_db_creds.json \
+ --db @replica \
+ --sql "select * from loadtest_table where id=123456789" \
+ --threads 10 \
+ --count 10
+
+GRPC to vtgate:
+vtbench \
+ --protocol grpc-vtgate \
+ --host vtgate-host.my.domain \
+ --port 15999 \
+ --db @replica \
+ $VTTABLET_GRPC_ARGS \
+ --sql "select * from loadtest_table where id=123456789" \
+ --threads 10 \
+ --count 10
+
+GRPC to vttablet:
+vtbench \
+ --protocol grpc-vttablet \
+ --host tablet-loadtest-00-80.my.domain \
+ --port 15999 \
+ --db loadtest/00-80@replica \
+ --sql "select * from loadtest_table where id=123456789" \
+ --threads 10 \
+ --count 10`,
+ Args: cobra.NoArgs,
+ Version: servenv.AppVersion.String(),
+ PreRunE: servenv.CobraPreRunE,
+ RunE: run,
+ }
+)
+
+func init() {
+ servenv.MoveFlagsToCobraCommand(Main)
+
+ Main.Flags().StringVar(&host, "host", host, "VTGate host(s) in the form 'host1,host2,...'")
+ Main.Flags().IntVar(&port, "port", port, "VTGate port")
+ Main.Flags().StringVar(&unixSocket, "unix_socket", unixSocket, "VTGate unix socket")
+ Main.Flags().StringVar(&protocol, "protocol", protocol, "Client protocol, either mysql (default), grpc-vtgate, or grpc-vttablet")
+ Main.Flags().StringVar(&user, "user", user, "Username to connect using mysql (password comes from the db-credentials-file)")
+ Main.Flags().StringVar(&db, "db", db, "Database name to use when connecting / running the queries (e.g. @replica, keyspace, keyspace/shard etc)")
+
+ Main.Flags().DurationVar(&deadline, "deadline", deadline, "Maximum duration for the test run (default 5 minutes)")
+ Main.Flags().StringVar(&sql, "sql", sql, "SQL statement to execute")
+ Main.Flags().IntVar(&threads, "threads", threads, "Number of parallel threads to run")
+ Main.Flags().IntVar(&count, "count", count, "Number of queries per thread")
+
+ Main.MarkFlagRequired("sql")
+
+ grpccommon.RegisterFlags(Main.Flags())
+ acl.RegisterFlags(Main.Flags())
+ servenv.RegisterMySQLServerFlags(Main.Flags())
+}
+
+func run(cmd *cobra.Command, args []string) error {
+ logger := logutil.NewConsoleLogger()
+ cmd.SetOutput(logutil.NewLoggerWriter(logger))
+ _ = cmd.Flags().Set("logtostderr", "true")
+
+ servenv.Init()
+
+ var clientProto vtbench.ClientProtocol
+ switch protocol {
+ case "", "mysql":
+ clientProto = vtbench.MySQL
+ case "grpc-vtgate":
+ clientProto = vtbench.GRPCVtgate
+ case "grpc-vttablet":
+ clientProto = vtbench.GRPCVttablet
+ default:
+ return fmt.Errorf("invalid client protocol %s", protocol)
+ }
+
+ if (host != "" || port != 0) && unixSocket != "" {
+ return errors.New("can't specify both host:port and unix_socket")
+ }
+
+ if host != "" && port == 0 {
+ return errors.New("must specify port when using host")
+ }
+
+ if host == "" && port != 0 {
+ return errors.New("must specify host when using port")
+ }
+
+ if host == "" && port == 0 && unixSocket == "" {
+ return errors.New("vtbench requires either host/port or unix_socket")
+ }
+
+ var password string
+ if clientProto == vtbench.MySQL {
+ var err error
+ _, password, err = dbconfigs.GetCredentialsServer().GetUserAndPassword(user)
+ if err != nil {
+ return fmt.Errorf("error reading password for user %v from file: %w", user, err)
+ }
+ }
+
+ connParams := vtbench.ConnParams{
+ Hosts: strings.Split(host, ","),
+ Port: port,
+ UnixSocket: unixSocket,
+ Protocol: clientProto,
+ DB: db,
+ Username: user,
+ Password: password,
+ }
+
+ b := vtbench.NewBench(threads, count, connParams, sql)
+
+ ctx, cancel := context.WithTimeout(context.Background(), deadline)
+ defer cancel()
+
+ fmt.Printf("Initializing test with %s protocol / %d threads / %d iterations\n",
+ b.ConnParams.Protocol.String(), b.Threads, b.Count)
+ err := b.Run(ctx)
+ if err != nil {
+ return fmt.Errorf("error in test: %w", err)
+ }
+
+ fmt.Printf("Average Rows Returned: %d\n", b.Rows.Get()/int64(b.Threads*b.Count))
+ fmt.Printf("Average Query Time: %v\n", time.Duration(b.Timings.Time()/b.Timings.Count()))
+ fmt.Printf("Total Test Time: %v\n", b.TotalTime)
+ fmt.Printf("QPS (Per Thread): %v\n", float64(b.Count)/b.TotalTime.Seconds())
+ fmt.Printf("QPS (Total): %v\n", float64(b.Count*b.Threads)/b.TotalTime.Seconds())
+
+ last := int64(0)
+
+ histograms := b.Timings.Histograms()
+ h := histograms["query"]
+ buckets := h.Buckets()
+ fmt.Printf("Query Timings:\n")
+ for i, bucket := range h.Cutoffs() {
+ count := buckets[i]
+ if count != 0 {
+ fmt.Printf("%v-%v: %v\n", time.Duration(last), time.Duration(bucket), count)
+ }
+ last = bucket
+ }
+
+ return nil
+}
diff --git a/go/cmd/vtbench/docgen/main.go b/go/cmd/vtbench/docgen/main.go
new file mode 100644
index 00000000000..5efe9e899a8
--- /dev/null
+++ b/go/cmd/vtbench/docgen/main.go
@@ -0,0 +1,37 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/cmd/internal/docgen"
+ "vitess.io/vitess/go/cmd/vtbench/cli"
+)
+
+func main() {
+ var dir string
+ cmd := cobra.Command{
+ Use: "docgen [-d ]",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return docgen.GenerateMarkdownTree(cli.Main, dir)
+ },
+ }
+
+ cmd.Flags().StringVarP(&dir, "dir", "d", "doc", "output directory to write documentation")
+ _ = cmd.Execute()
+}
diff --git a/go/cmd/vtbench/vtbench.go b/go/cmd/vtbench/vtbench.go
index 19044aae4ed..0d8bb85b536 100644
--- a/go/cmd/vtbench/vtbench.go
+++ b/go/cmd/vtbench/vtbench.go
@@ -17,194 +17,15 @@ limitations under the License.
package main
import (
- "context"
- "fmt"
- "strings"
- "time"
-
- "github.com/spf13/pflag"
-
- "vitess.io/vitess/go/acl"
+ "vitess.io/vitess/go/cmd/vtbench/cli"
"vitess.io/vitess/go/exit"
- "vitess.io/vitess/go/vt/dbconfigs"
- "vitess.io/vitess/go/vt/grpccommon"
"vitess.io/vitess/go/vt/log"
- "vitess.io/vitess/go/vt/logutil"
- "vitess.io/vitess/go/vt/servenv"
- "vitess.io/vitess/go/vtbench"
-
- // Import and register the gRPC vtgateconn client
- _ "vitess.io/vitess/go/vt/vtgate/grpcvtgateconn"
- // Import and register the gRPC tabletconn client
- _ "vitess.io/vitess/go/vt/vttablet/grpctabletconn"
-)
-
-/*
-
- Vtbench is a simple load testing client to compare workloads in
- Vitess across the various client/server protocols.
-
- There are a number of command line options to control the behavior,
- but as a basic example, the three supported client protocols are:
-
- Mysql protocol to vtgate:
- vtbench \
- --protocol mysql \
- --host vtgate-host.my.domain \
- --port 15306 \
- --user db_username \
- --db-credentials-file ./vtbench_db_creds.json \
- --db @replica \
- --sql "select * from loadtest_table where id=123456789" \
- --threads 10 \
- --count 10
-
- GRPC to vtgate:
- vtbench \
- --protocol grpc-vtgate \
- --host vtgate-host.my.domain \
- --port 15999 \
- --db @replica \
- $VTTABLET_GRPC_ARGS \
- --sql "select * from loadtest_table where id=123456789" \
- --threads 10 \
- --count 10
-
- GRPC to vttablet:
- vtbench \
- --protocol grpc-vttablet \
- --host tablet-loadtest-00-80.my.domain \
- --port 15999 \
- --db loadtest/00-80@replica \
- --sql "select * from loadtest_table where id=123456789" \
- --threads 10 \
- --count 10
-
-*/
-
-var (
- host, unixSocket, user, db, sql string
- port int
- protocol = "mysql"
- deadline = 5 * time.Minute
- threads = 2
- count = 1000
)
-func initFlags(fs *pflag.FlagSet) {
- fs.StringVar(&host, "host", host, "VTGate host(s) in the form 'host1,host2,...'")
- fs.IntVar(&port, "port", port, "VTGate port")
- fs.StringVar(&unixSocket, "unix_socket", unixSocket, "VTGate unix socket")
- fs.StringVar(&protocol, "protocol", protocol, "Client protocol, either mysql (default), grpc-vtgate, or grpc-vttablet")
- fs.StringVar(&user, "user", user, "Username to connect using mysql (password comes from the db-credentials-file)")
- fs.StringVar(&db, "db", db, "Database name to use when connecting / running the queries (e.g. @replica, keyspace, keyspace/shard etc)")
-
- fs.DurationVar(&deadline, "deadline", deadline, "Maximum duration for the test run (default 5 minutes)")
- fs.StringVar(&sql, "sql", sql, "SQL statement to execute")
- fs.IntVar(&threads, "threads", threads, "Number of parallel threads to run")
- fs.IntVar(&count, "count", count, "Number of queries per thread")
-
- grpccommon.RegisterFlags(fs)
- log.RegisterFlags(fs)
- logutil.RegisterFlags(fs)
- acl.RegisterFlags(fs)
- servenv.RegisterMySQLServerFlags(fs)
-}
-
func main() {
- servenv.OnParseFor("vtbench", func(fs *pflag.FlagSet) {
- logger := logutil.NewConsoleLogger()
- fs.SetOutput(logutil.NewLoggerWriter(logger))
-
- initFlags(fs)
- _ = fs.Set("logtostderr", "true")
- })
-
- servenv.ParseFlags("vtbench")
- servenv.Init()
-
defer exit.Recover()
- clientProto := vtbench.MySQL
- switch protocol {
- case "", "mysql":
- clientProto = vtbench.MySQL
- case "grpc-vtgate":
- clientProto = vtbench.GRPCVtgate
- case "grpc-vttablet":
- clientProto = vtbench.GRPCVttablet
- default:
- log.Exitf("invalid client protocol %s", protocol)
- }
-
- if (host != "" || port != 0) && unixSocket != "" {
- log.Exitf("can't specify both host:port and unix_socket")
- }
-
- if host != "" && port == 0 {
- log.Exitf("must specify port when using host")
- }
-
- if host == "" && port != 0 {
- log.Exitf("must specify host when using port")
- }
-
- if host == "" && port == 0 && unixSocket == "" {
- log.Exitf("vtbench requires either host/port or unix_socket")
- }
-
- if sql == "" {
- log.Exitf("must specify sql")
- }
-
- var password string
- if clientProto == vtbench.MySQL {
- var err error
- _, password, err = dbconfigs.GetCredentialsServer().GetUserAndPassword(user)
- if err != nil {
- log.Exitf("error reading password for user %v from file: %v", user, err)
- }
- }
-
- connParams := vtbench.ConnParams{
- Hosts: strings.Split(host, ","),
- Port: port,
- UnixSocket: unixSocket,
- Protocol: clientProto,
- DB: db,
- Username: user,
- Password: password,
- }
-
- b := vtbench.NewBench(threads, count, connParams, sql)
-
- ctx, cancel := context.WithTimeout(context.Background(), deadline)
- defer cancel()
-
- fmt.Printf("Initializing test with %s protocol / %d threads / %d iterations\n",
- b.ConnParams.Protocol.String(), b.Threads, b.Count)
- err := b.Run(ctx)
- if err != nil {
- log.Exitf("error in test: %v", err)
- }
-
- fmt.Printf("Average Rows Returned: %d\n", b.Rows.Get()/int64(b.Threads*b.Count))
- fmt.Printf("Average Query Time: %v\n", time.Duration(b.Timings.Time()/b.Timings.Count()))
- fmt.Printf("Total Test Time: %v\n", b.TotalTime)
- fmt.Printf("QPS (Per Thread): %v\n", float64(b.Count)/b.TotalTime.Seconds())
- fmt.Printf("QPS (Total): %v\n", float64(b.Count*b.Threads)/b.TotalTime.Seconds())
-
- last := int64(0)
-
- histograms := b.Timings.Histograms()
- h := histograms["query"]
- buckets := h.Buckets()
- fmt.Printf("Query Timings:\n")
- for i, bucket := range h.Cutoffs() {
- count := buckets[i]
- if count != 0 {
- fmt.Printf("%v-%v: %v\n", time.Duration(last), time.Duration(bucket), count)
- }
- last = bucket
+ if err := cli.Main.Execute(); err != nil {
+ log.Exit(err)
}
}
diff --git a/go/cmd/vtclient/plugin_opentracing.go b/go/cmd/vtclient/cli/plugin_opentracing.go
similarity index 98%
rename from go/cmd/vtclient/plugin_opentracing.go
rename to go/cmd/vtclient/cli/plugin_opentracing.go
index b48334531a3..a3466ca8c73 100644
--- a/go/cmd/vtclient/plugin_opentracing.go
+++ b/go/cmd/vtclient/cli/plugin_opentracing.go
@@ -14,11 +14,10 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
import (
"vitess.io/vitess/go/trace"
-
"vitess.io/vitess/go/vt/servenv"
)
diff --git a/go/cmd/vtclient/cli/vtclient.go b/go/cmd/vtclient/cli/vtclient.go
new file mode 100644
index 00000000000..949af851ab4
--- /dev/null
+++ b/go/cmd/vtclient/cli/vtclient.go
@@ -0,0 +1,431 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cli
+
+import (
+ "context"
+ "database/sql"
+ "encoding/json"
+ "fmt"
+ "io"
+ "math/rand"
+ "os"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/olekukonko/tablewriter"
+ "github.com/spf13/cobra"
+ "github.com/spf13/pflag"
+
+ "vitess.io/vitess/go/acl"
+ "vitess.io/vitess/go/vt/concurrency"
+ "vitess.io/vitess/go/vt/grpccommon"
+ "vitess.io/vitess/go/vt/log"
+ "vitess.io/vitess/go/vt/logutil"
+ "vitess.io/vitess/go/vt/servenv"
+ "vitess.io/vitess/go/vt/sqlparser"
+ "vitess.io/vitess/go/vt/vitessdriver"
+ "vitess.io/vitess/go/vt/vterrors"
+ "vitess.io/vitess/go/vt/vtgate/vtgateconn"
+
+ vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
+ // Include deprecation warnings for soon-to-be-unsupported flag invocations.
+)
+
+var (
+ server string
+ streaming bool
+ targetString string
+ jsonOutput bool
+ useRandom bool
+ bindVariables *bindvars
+
+ timeout = 30 * time.Second
+ parallel = 1
+ count = 1
+ minSeqID int
+ maxSeqID int
+ qps int
+
+ Main = &cobra.Command{
+ Use: "vtclient ",
+ Short: "vtclient connects to a vtgate server using the standard go driver API.",
+ Long: `vtclient connects to a vtgate server using the standard go driver API.
+
+For query bound variables, we assume place-holders in the query string
+in the form of :v1, :v2, etc.`,
+ Example: `vtclient --server vtgate:15991 "SELECT * FROM messages"
+
+vtclient --server vtgate:15991 --target '@primary' --bind_variables '[ 12345, 1, "msg 12345" ]' "INSERT INTO messages (page,time_created_ns,message) VALUES (:v1, :v2, :v3)"`,
+ Args: cobra.ExactArgs(1),
+ Version: servenv.AppVersion.String(),
+ RunE: run,
+ }
+)
+
+var (
+ seqChan = make(chan int, 10)
+)
+
+func init() {
+ servenv.MoveFlagsToCobraCommand(Main)
+
+ Main.Flags().StringVar(&server, "server", server, "vtgate server to connect to")
+ Main.Flags().DurationVar(&timeout, "timeout", timeout, "timeout for queries")
+ Main.Flags().BoolVar(&streaming, "streaming", streaming, "use a streaming query")
+ Main.Flags().StringVar(&targetString, "target", targetString, "keyspace:shard@tablet_type")
+ Main.Flags().BoolVar(&jsonOutput, "json", jsonOutput, "Output JSON instead of human-readable table")
+ Main.Flags().IntVar(¶llel, "parallel", parallel, "DMLs only: Number of threads executing the same query in parallel. Useful for simple load testing.")
+ Main.Flags().IntVar(&count, "count", count, "DMLs only: Number of times each thread executes the query. Useful for simple, sustained load testing.")
+ Main.Flags().IntVar(&minSeqID, "min_sequence_id", minSeqID, "min sequence ID to generate. When max_sequence_id > min_sequence_id, for each query, a number is generated in [min_sequence_id, max_sequence_id) and attached to the end of the bind variables.")
+ Main.Flags().IntVar(&maxSeqID, "max_sequence_id", maxSeqID, "max sequence ID.")
+ Main.Flags().BoolVar(&useRandom, "use_random_sequence", useRandom, "use random sequence for generating [min_sequence_id, max_sequence_id)")
+ Main.Flags().IntVar(&qps, "qps", qps, "queries per second to throttle each thread at.")
+
+ acl.RegisterFlags(Main.Flags())
+ grpccommon.RegisterFlags(Main.Flags())
+ servenv.RegisterMySQLServerFlags(Main.Flags())
+
+ bindVariables = newBindvars(Main.Flags(), "bind_variables", "bind variables as a json list")
+}
+
+type bindvars []any
+
+func (bv *bindvars) String() string {
+ b, err := json.Marshal(bv)
+ if err != nil {
+ return err.Error()
+ }
+ return string(b)
+}
+
+func (bv *bindvars) Set(s string) (err error) {
+ err = json.Unmarshal([]byte(s), &bv)
+ if err != nil {
+ return err
+ }
+ // json reads all numbers as float64
+ // So, we just ditch floats for bindvars
+ for i, v := range *bv {
+ if f, ok := v.(float64); ok {
+ if f > 0 {
+ (*bv)[i] = uint64(f)
+ } else {
+ (*bv)[i] = int64(f)
+ }
+ }
+ }
+
+ return nil
+}
+
+// For internal flag compatibility
+func (bv *bindvars) Get() any {
+ return bv
+}
+
+// Type is part of the pflag.Value interface. bindvars.Set() expects all numbers as float64.
+func (bv *bindvars) Type() string {
+ return "float64"
+}
+
+func newBindvars(fs *pflag.FlagSet, name, usage string) *bindvars {
+ var bv bindvars
+ fs.Var(&bv, name, usage)
+ return &bv
+}
+
+func run(cmd *cobra.Command, args []string) error {
+ defer logutil.Flush()
+
+ qr, err := _run(cmd, args)
+ if jsonOutput && qr != nil {
+ data, err := json.MarshalIndent(qr, "", " ")
+ if err != nil {
+ return fmt.Errorf("cannot marshal data: %w", err)
+ }
+ fmt.Fprint(cmd.OutOrStdout(), string(data))
+ return nil
+ }
+
+ qr.print(cmd.OutOrStdout())
+ return err
+}
+
+func _run(cmd *cobra.Command, args []string) (*results, error) {
+ logutil.PurgeLogs()
+
+ if maxSeqID > minSeqID {
+ go func() {
+ if useRandom {
+ for {
+ seqChan <- rand.Intn(maxSeqID-minSeqID) + minSeqID
+ }
+ } else {
+ for i := minSeqID; i < maxSeqID; i++ {
+ seqChan <- i
+ }
+ }
+ }()
+ }
+
+ c := vitessdriver.Configuration{
+ Protocol: vtgateconn.GetVTGateProtocol(),
+ Address: server,
+ Target: targetString,
+ Streaming: streaming,
+ }
+ db, err := vitessdriver.OpenWithConfiguration(c)
+ if err != nil {
+ return nil, fmt.Errorf("client error: %w", err)
+ }
+
+ log.Infof("Sending the query...")
+
+ ctx, cancel := context.WithTimeout(context.Background(), timeout)
+ defer cancel()
+ return execMulti(ctx, db, cmd.Flags().Arg(0))
+}
+
+func prepareBindVariables() []any {
+ bv := make([]any, 0, len(*bindVariables)+1)
+ bv = append(bv, (*bindVariables)...)
+ if maxSeqID > minSeqID {
+ bv = append(bv, <-seqChan)
+ }
+ return bv
+}
+
+func execMulti(ctx context.Context, db *sql.DB, sql string) (*results, error) {
+ all := newResults()
+ ec := concurrency.FirstErrorRecorder{}
+ wg := sync.WaitGroup{}
+ isDML := sqlparser.IsDML(sql)
+
+ isThrottled := qps > 0
+
+ start := time.Now()
+ for i := 0; i < parallel; i++ {
+ wg.Add(1)
+
+ go func() {
+ defer wg.Done()
+
+ var ticker *time.Ticker
+ if isThrottled {
+ tickDuration := time.Second / time.Duration(qps)
+ ticker = time.NewTicker(tickDuration)
+ }
+
+ for j := 0; j < count; j++ {
+ var qr *results
+ var err error
+ if isDML {
+ qr, err = execDml(ctx, db, sql)
+ } else {
+ qr, err = execNonDml(ctx, db, sql)
+ }
+ if count == 1 && parallel == 1 {
+ all = qr
+ } else {
+ all.merge(qr)
+ if err != nil {
+ all.recordError(err)
+ }
+ }
+ if err != nil {
+ ec.RecordError(err)
+ // We keep going and do not return early purpose.
+ }
+
+ if ticker != nil {
+ <-ticker.C
+ }
+ }
+ }()
+ }
+ wg.Wait()
+ if all != nil {
+ all.duration = time.Since(start)
+ }
+
+ return all, ec.Error()
+}
+
+func execDml(ctx context.Context, db *sql.DB, sql string) (*results, error) {
+ start := time.Now()
+ tx, err := db.Begin()
+ if err != nil {
+ return nil, vterrors.Wrap(err, "BEGIN failed")
+ }
+
+ result, err := tx.ExecContext(ctx, sql, []any(prepareBindVariables())...)
+ if err != nil {
+ return nil, vterrors.Wrap(err, "failed to execute DML")
+ }
+
+ err = tx.Commit()
+ if err != nil {
+ return nil, vterrors.Wrap(err, "COMMIT failed")
+ }
+
+ rowsAffected, _ := result.RowsAffected()
+ lastInsertID, _ := result.LastInsertId()
+ return &results{
+ rowsAffected: rowsAffected,
+ lastInsertID: lastInsertID,
+ duration: time.Since(start),
+ }, nil
+}
+
+func execNonDml(ctx context.Context, db *sql.DB, sql string) (*results, error) {
+ start := time.Now()
+ rows, err := db.QueryContext(ctx, sql, []any(prepareBindVariables())...)
+ if err != nil {
+ return nil, vterrors.Wrap(err, "client error")
+ }
+ defer rows.Close()
+
+ // get the headers
+ var qr results
+ cols, err := rows.Columns()
+ if err != nil {
+ return nil, vterrors.Wrap(err, "client error")
+ }
+ qr.Fields = cols
+
+ // get the rows
+ for rows.Next() {
+ row := make([]any, len(cols))
+ for i := range row {
+ var col string
+ row[i] = &col
+ }
+ if err := rows.Scan(row...); err != nil {
+ return nil, vterrors.Wrap(err, "client error")
+ }
+
+ // unpack []*string into []string
+ vals := make([]string, 0, len(row))
+ for _, value := range row {
+ vals = append(vals, *(value.(*string)))
+ }
+ qr.Rows = append(qr.Rows, vals)
+ }
+ qr.rowsAffected = int64(len(qr.Rows))
+
+ if err := rows.Err(); err != nil {
+ return nil, vterrors.Wrap(err, "Vitess returned an error")
+ }
+
+ qr.duration = time.Since(start)
+ return &qr, nil
+}
+
+type results struct {
+ mu sync.Mutex
+ Fields []string `json:"fields"`
+ Rows [][]string `json:"rows"`
+ rowsAffected int64
+ lastInsertID int64
+ duration time.Duration
+ cumulativeDuration time.Duration
+
+ // Multi DML mode: Track total error count, error count per code and the first error.
+ totalErrorCount int
+ errorCount map[vtrpcpb.Code]int
+ firstError map[vtrpcpb.Code]error
+}
+
+func newResults() *results {
+ return &results{
+ errorCount: make(map[vtrpcpb.Code]int),
+ firstError: make(map[vtrpcpb.Code]error),
+ }
+}
+
+// merge aggregates "other" into "r".
+// This is only used for executing DMLs concurrently and repeatedly.
+// Therefore, "Fields" and "Rows" are not merged.
+func (r *results) merge(other *results) {
+ if other == nil {
+ return
+ }
+
+ r.mu.Lock()
+ defer r.mu.Unlock()
+
+ r.rowsAffected += other.rowsAffected
+ if other.lastInsertID > r.lastInsertID {
+ r.lastInsertID = other.lastInsertID
+ }
+ r.cumulativeDuration += other.duration
+}
+
+func (r *results) recordError(err error) {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+
+ r.totalErrorCount++
+ code := vterrors.Code(err)
+ r.errorCount[code]++
+
+ if r.errorCount[code] == 1 {
+ r.firstError[code] = err
+ }
+}
+
+func (r *results) print(w io.Writer) {
+ if r == nil {
+ return
+ }
+
+ table := tablewriter.NewWriter(os.Stdout)
+ table.SetHeader(r.Fields)
+ table.SetAutoFormatHeaders(false)
+ table.AppendBulk(r.Rows)
+ table.Render()
+ fmt.Fprintf(w, "%v row(s) affected (%v, cum: %v)\n", r.rowsAffected, r.duration, r.cumulativeDuration)
+ if r.lastInsertID != 0 {
+ fmt.Fprintf(w, "Last insert ID: %v\n", r.lastInsertID)
+ }
+
+ if r.totalErrorCount == 0 {
+ return
+ }
+
+ fmt.Printf("%d error(s) were returned. Number of errors by error code:\n\n", r.totalErrorCount)
+ // Sort different error codes by count (descending).
+ type errorCounts struct {
+ code vtrpcpb.Code
+ count int
+ }
+ var counts []errorCounts
+ for code, count := range r.errorCount {
+ counts = append(counts, errorCounts{code, count})
+ }
+ sort.Slice(counts, func(i, j int) bool { return counts[i].count >= counts[j].count })
+ for _, c := range counts {
+ fmt.Fprintf(w, "%- 30v= % 5d\n", c.code, c.count)
+ }
+
+ fmt.Fprintf(w, "\nFirst error per code:\n\n")
+ for code, err := range r.firstError {
+ fmt.Fprintf(w, "Code: %v\nError: %v\n\n", code, err)
+ }
+}
diff --git a/go/cmd/vtclient/vtclient_test.go b/go/cmd/vtclient/cli/vtclient_test.go
similarity index 90%
rename from go/cmd/vtclient/vtclient_test.go
rename to go/cmd/vtclient/cli/vtclient_test.go
index 4711b1e0127..a5ee571cd0b 100644
--- a/go/cmd/vtclient/vtclient_test.go
+++ b/go/cmd/vtclient/cli/vtclient_test.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
import (
"fmt"
@@ -22,7 +22,7 @@ import (
"strings"
"testing"
- "github.com/spf13/pflag"
+ "github.com/stretchr/testify/require"
"vitess.io/vitess/go/vt/vttest"
@@ -120,15 +120,16 @@ func TestVtclient(t *testing.T) {
},
}
- // Change ErrorHandling from ExitOnError to panicking.
- pflag.CommandLine.Init("vtclient_test.go", pflag.PanicOnError)
for _, q := range queries {
// Run main function directly and not as external process. To achieve this,
// overwrite os.Args which is used by pflag.Parse().
- os.Args = []string{"vtclient_test.go", "--server", vtgateAddr}
- os.Args = append(os.Args, q.args...)
+ args := []string{"--server", vtgateAddr}
+ args = append(args, q.args...)
- results, err := run()
+ err := Main.ParseFlags(args)
+ require.NoError(t, err)
+
+ results, err := _run(Main, args)
if q.errMsg != "" {
if got, want := err.Error(), q.errMsg; !strings.Contains(got, want) {
t.Fatalf("vtclient %v returned wrong error: got = %v, want contains = %v", os.Args[1:], got, want)
@@ -137,7 +138,7 @@ func TestVtclient(t *testing.T) {
}
if err != nil {
- t.Fatalf("vtclient %v failed: %v", os.Args[1:], err)
+ t.Fatalf("vtclient %v failed: %v", args[1:], err)
}
if got, want := results.rowsAffected, q.rowsAffected; got != want {
t.Fatalf("wrong rows affected for query: %v got = %v, want = %v", os.Args[1:], got, want)
diff --git a/go/cmd/vtclient/docgen/main.go b/go/cmd/vtclient/docgen/main.go
new file mode 100644
index 00000000000..b740cbd67a7
--- /dev/null
+++ b/go/cmd/vtclient/docgen/main.go
@@ -0,0 +1,37 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/cmd/internal/docgen"
+ "vitess.io/vitess/go/cmd/vtclient/cli"
+)
+
+func main() {
+ var dir string
+ cmd := cobra.Command{
+ Use: "docgen [-d ]",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return docgen.GenerateMarkdownTree(cli.Main, dir)
+ },
+ }
+
+ cmd.Flags().StringVarP(&dir, "dir", "d", "doc", "output directory to write documentation")
+ _ = cmd.Execute()
+}
diff --git a/go/cmd/vtclient/vtclient.go b/go/cmd/vtclient/vtclient.go
index adc060d7737..4201d25c882 100644
--- a/go/cmd/vtclient/vtclient.go
+++ b/go/cmd/vtclient/vtclient.go
@@ -17,440 +17,12 @@ limitations under the License.
package main
import (
- "context"
- "database/sql"
- "encoding/json"
- "errors"
- "flag"
- "fmt"
- "io"
- "math/rand"
- "os"
- "sort"
- "sync"
- "time"
-
- "github.com/olekukonko/tablewriter"
- "github.com/spf13/pflag"
-
- "vitess.io/vitess/go/acl"
- "vitess.io/vitess/go/vt/concurrency"
- "vitess.io/vitess/go/vt/grpccommon"
+ "vitess.io/vitess/go/cmd/vtclient/cli"
"vitess.io/vitess/go/vt/log"
- "vitess.io/vitess/go/vt/logutil"
- "vitess.io/vitess/go/vt/servenv"
- "vitess.io/vitess/go/vt/sqlparser"
- "vitess.io/vitess/go/vt/vitessdriver"
- "vitess.io/vitess/go/vt/vterrors"
- "vitess.io/vitess/go/vt/vtgate/vtgateconn"
-
- vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
-
- // Include deprecation warnings for soon-to-be-unsupported flag invocations.
- _flag "vitess.io/vitess/go/internal/flag"
)
-var (
- usage = `
-vtclient connects to a vtgate server using the standard go driver API.
-Version 3 of the API is used, we do not send any hint to the server.
-
-For query bound variables, we assume place-holders in the query string
-in the form of :v1, :v2, etc.
-
-Examples:
-
- $ vtclient --server vtgate:15991 "SELECT * FROM messages"
-
- $ vtclient --server vtgate:15991 --target '@primary' --bind_variables '[ 12345, 1, "msg 12345" ]' "INSERT INTO messages (page,time_created_ns,message) VALUES (:v1, :v2, :v3)"
-
-`
- server string
- streaming bool
- targetString string
- jsonOutput bool
- useRandom bool
- bindVariables *bindvars
-
- timeout = 30 * time.Second
- parallel = 1
- count = 1
- minSeqID = 0
- maxSeqID = 0
- qps = 0
-)
-
-var (
- seqChan = make(chan int, 10)
-)
-
-func init() {
- _flag.SetUsage(flag.CommandLine, _flag.UsageOptions{
- Epilogue: func(w io.Writer) { fmt.Fprint(w, usage) },
- })
-}
-
-func registerFlags(fs *pflag.FlagSet) {
- fs.StringVar(&server, "server", server, "vtgate server to connect to")
- fs.DurationVar(&timeout, "timeout", timeout, "timeout for queries")
- fs.BoolVar(&streaming, "streaming", streaming, "use a streaming query")
- fs.StringVar(&targetString, "target", targetString, "keyspace:shard@tablet_type")
- fs.BoolVar(&jsonOutput, "json", jsonOutput, "Output JSON instead of human-readable table")
- fs.IntVar(¶llel, "parallel", parallel, "DMLs only: Number of threads executing the same query in parallel. Useful for simple load testing.")
- fs.IntVar(&count, "count", count, "DMLs only: Number of times each thread executes the query. Useful for simple, sustained load testing.")
- fs.IntVar(&minSeqID, "min_sequence_id", minSeqID, "min sequence ID to generate. When max_sequence_id > min_sequence_id, for each query, a number is generated in [min_sequence_id, max_sequence_id) and attached to the end of the bind variables.")
- fs.IntVar(&maxSeqID, "max_sequence_id", maxSeqID, "max sequence ID.")
- fs.BoolVar(&useRandom, "use_random_sequence", useRandom, "use random sequence for generating [min_sequence_id, max_sequence_id)")
- fs.IntVar(&qps, "qps", qps, "queries per second to throttle each thread at.")
-
- acl.RegisterFlags(fs)
-
- bindVariables = newBindvars(fs, "bind_variables", "bind variables as a json list")
-}
-
-type bindvars []any
-
-func (bv *bindvars) String() string {
- b, err := json.Marshal(bv)
- if err != nil {
- return err.Error()
- }
- return string(b)
-}
-
-func (bv *bindvars) Set(s string) (err error) {
- err = json.Unmarshal([]byte(s), &bv)
- if err != nil {
- return err
- }
- // json reads all numbers as float64
- // So, we just ditch floats for bindvars
- for i, v := range *bv {
- if f, ok := v.(float64); ok {
- if f > 0 {
- (*bv)[i] = uint64(f)
- } else {
- (*bv)[i] = int64(f)
- }
- }
- }
-
- return nil
-}
-
-// For internal flag compatibility
-func (bv *bindvars) Get() any {
- return bv
-}
-
-// Type is part of the pflag.Value interface. bindvars.Set() expects all numbers as float64.
-func (bv *bindvars) Type() string {
- return "float64"
-}
-
-func newBindvars(fs *pflag.FlagSet, name, usage string) *bindvars {
- var bv bindvars
- fs.Var(&bv, name, usage)
- return &bv
-}
-
func main() {
- defer logutil.Flush()
-
- qr, err := run()
- if jsonOutput && qr != nil {
- data, err := json.MarshalIndent(qr, "", " ")
- if err != nil {
- log.Exitf("cannot marshal data: %v", err)
- }
- fmt.Print(string(data))
- return
- }
-
- qr.print()
-
- if err != nil {
+ if err := cli.Main.Execute(); err != nil {
log.Exit(err)
}
}
-
-func run() (*results, error) {
- fs := pflag.NewFlagSet("vtclient", pflag.ExitOnError)
- grpccommon.RegisterFlags(fs)
- log.RegisterFlags(fs)
- logutil.RegisterFlags(fs)
- servenv.RegisterMySQLServerFlags(fs)
- registerFlags(fs)
- _flag.Parse(fs)
- args := _flag.Args()
-
- logutil.PurgeLogs()
-
- if len(args) == 0 {
- pflag.Usage()
- return nil, errors.New("no arguments provided. See usage above")
- }
- if len(args) > 1 {
- return nil, errors.New("no additional arguments after the query allowed")
- }
-
- if maxSeqID > minSeqID {
- go func() {
- if useRandom {
- rand.Seed(time.Now().UnixNano())
- for {
- seqChan <- rand.Intn(maxSeqID-minSeqID) + minSeqID
- }
- } else {
- for i := minSeqID; i < maxSeqID; i++ {
- seqChan <- i
- }
- }
- }()
- }
-
- c := vitessdriver.Configuration{
- Protocol: vtgateconn.GetVTGateProtocol(),
- Address: server,
- Target: targetString,
- Streaming: streaming,
- }
- db, err := vitessdriver.OpenWithConfiguration(c)
- if err != nil {
- return nil, fmt.Errorf("client error: %v", err)
- }
-
- log.Infof("Sending the query...")
-
- ctx, cancel := context.WithTimeout(context.Background(), timeout)
- defer cancel()
- return execMulti(ctx, db, args[0])
-}
-
-func prepareBindVariables() []any {
- bv := make([]any, 0, len(*bindVariables)+1)
- bv = append(bv, (*bindVariables)...)
- if maxSeqID > minSeqID {
- bv = append(bv, <-seqChan)
- }
- return bv
-}
-
-func execMulti(ctx context.Context, db *sql.DB, sql string) (*results, error) {
- all := newResults()
- ec := concurrency.FirstErrorRecorder{}
- wg := sync.WaitGroup{}
- isDML := sqlparser.IsDML(sql)
-
- isThrottled := qps > 0
-
- start := time.Now()
- for i := 0; i < parallel; i++ {
- wg.Add(1)
-
- go func() {
- defer wg.Done()
-
- var ticker *time.Ticker
- if isThrottled {
- tickDuration := time.Second / time.Duration(qps)
- ticker = time.NewTicker(tickDuration)
- }
-
- for j := 0; j < count; j++ {
- var qr *results
- var err error
- if isDML {
- qr, err = execDml(ctx, db, sql)
- } else {
- qr, err = execNonDml(ctx, db, sql)
- }
- if count == 1 && parallel == 1 {
- all = qr
- } else {
- all.merge(qr)
- if err != nil {
- all.recordError(err)
- }
- }
- if err != nil {
- ec.RecordError(err)
- // We keep going and do not return early purpose.
- }
-
- if ticker != nil {
- <-ticker.C
- }
- }
- }()
- }
- wg.Wait()
- if all != nil {
- all.duration = time.Since(start)
- }
-
- return all, ec.Error()
-}
-
-func execDml(ctx context.Context, db *sql.DB, sql string) (*results, error) {
- start := time.Now()
- tx, err := db.Begin()
- if err != nil {
- return nil, vterrors.Wrap(err, "BEGIN failed")
- }
-
- result, err := tx.ExecContext(ctx, sql, []any(prepareBindVariables())...)
- if err != nil {
- return nil, vterrors.Wrap(err, "failed to execute DML")
- }
-
- err = tx.Commit()
- if err != nil {
- return nil, vterrors.Wrap(err, "COMMIT failed")
- }
-
- rowsAffected, _ := result.RowsAffected()
- lastInsertID, _ := result.LastInsertId()
- return &results{
- rowsAffected: rowsAffected,
- lastInsertID: lastInsertID,
- duration: time.Since(start),
- }, nil
-}
-
-func execNonDml(ctx context.Context, db *sql.DB, sql string) (*results, error) {
- start := time.Now()
- rows, err := db.QueryContext(ctx, sql, []any(prepareBindVariables())...)
- if err != nil {
- return nil, vterrors.Wrap(err, "client error")
- }
- defer rows.Close()
-
- // get the headers
- var qr results
- cols, err := rows.Columns()
- if err != nil {
- return nil, vterrors.Wrap(err, "client error")
- }
- qr.Fields = cols
-
- // get the rows
- for rows.Next() {
- row := make([]any, len(cols))
- for i := range row {
- var col string
- row[i] = &col
- }
- if err := rows.Scan(row...); err != nil {
- return nil, vterrors.Wrap(err, "client error")
- }
-
- // unpack []*string into []string
- vals := make([]string, 0, len(row))
- for _, value := range row {
- vals = append(vals, *(value.(*string)))
- }
- qr.Rows = append(qr.Rows, vals)
- }
- qr.rowsAffected = int64(len(qr.Rows))
-
- if err := rows.Err(); err != nil {
- return nil, vterrors.Wrap(err, "Vitess returned an error")
- }
-
- qr.duration = time.Since(start)
- return &qr, nil
-}
-
-type results struct {
- mu sync.Mutex
- Fields []string `json:"fields"`
- Rows [][]string `json:"rows"`
- rowsAffected int64
- lastInsertID int64
- duration time.Duration
- cumulativeDuration time.Duration
-
- // Multi DML mode: Track total error count, error count per code and the first error.
- totalErrorCount int
- errorCount map[vtrpcpb.Code]int
- firstError map[vtrpcpb.Code]error
-}
-
-func newResults() *results {
- return &results{
- errorCount: make(map[vtrpcpb.Code]int),
- firstError: make(map[vtrpcpb.Code]error),
- }
-}
-
-// merge aggregates "other" into "r".
-// This is only used for executing DMLs concurrently and repeatedly.
-// Therefore, "Fields" and "Rows" are not merged.
-func (r *results) merge(other *results) {
- if other == nil {
- return
- }
-
- r.mu.Lock()
- defer r.mu.Unlock()
-
- r.rowsAffected += other.rowsAffected
- if other.lastInsertID > r.lastInsertID {
- r.lastInsertID = other.lastInsertID
- }
- r.cumulativeDuration += other.duration
-}
-
-func (r *results) recordError(err error) {
- r.mu.Lock()
- defer r.mu.Unlock()
-
- r.totalErrorCount++
- code := vterrors.Code(err)
- r.errorCount[code]++
-
- if r.errorCount[code] == 1 {
- r.firstError[code] = err
- }
-}
-
-func (r *results) print() {
- if r == nil {
- return
- }
-
- table := tablewriter.NewWriter(os.Stdout)
- table.SetHeader(r.Fields)
- table.SetAutoFormatHeaders(false)
- table.AppendBulk(r.Rows)
- table.Render()
- fmt.Printf("%v row(s) affected (%v, cum: %v)\n", r.rowsAffected, r.duration, r.cumulativeDuration)
- if r.lastInsertID != 0 {
- fmt.Printf("Last insert ID: %v\n", r.lastInsertID)
- }
-
- if r.totalErrorCount == 0 {
- return
- }
-
- fmt.Printf("%d error(s) were returned. Number of errors by error code:\n\n", r.totalErrorCount)
- // Sort different error codes by count (descending).
- type errorCounts struct {
- code vtrpcpb.Code
- count int
- }
- var counts []errorCounts
- for code, count := range r.errorCount {
- counts = append(counts, errorCounts{code, count})
- }
- sort.Slice(counts, func(i, j int) bool { return counts[i].count >= counts[j].count })
- for _, c := range counts {
- fmt.Printf("%- 30v= % 5d\n", c.code, c.count)
- }
-
- fmt.Printf("\nFirst error per code:\n\n")
- for code, err := range r.firstError {
- fmt.Printf("Code: %v\nError: %v\n\n", code, err)
- }
-}
diff --git a/go/cmd/vtcombo/cli/main.go b/go/cmd/vtcombo/cli/main.go
new file mode 100644
index 00000000000..bfc0ad894fe
--- /dev/null
+++ b/go/cmd/vtcombo/cli/main.go
@@ -0,0 +1,358 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// vtcombo: a single binary that contains:
+// - a ZK topology server based on an in-memory map.
+// - one vtgate instance.
+// - many vttablet instances.
+// - a vtctld instance so it's easy to see the topology.
+package cli
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/acl"
+ "vitess.io/vitess/go/mysql/replication"
+ "vitess.io/vitess/go/vt/dbconfigs"
+ "vitess.io/vitess/go/vt/log"
+ "vitess.io/vitess/go/vt/logutil"
+ "vitess.io/vitess/go/vt/mysqlctl"
+ "vitess.io/vitess/go/vt/servenv"
+ "vitess.io/vitess/go/vt/srvtopo"
+ "vitess.io/vitess/go/vt/topo"
+ "vitess.io/vitess/go/vt/topo/memorytopo"
+ "vitess.io/vitess/go/vt/topotools"
+ "vitess.io/vitess/go/vt/vtcombo"
+ "vitess.io/vitess/go/vt/vtctld"
+ "vitess.io/vitess/go/vt/vtgate"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
+ "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv"
+ "vitess.io/vitess/go/vt/vttest"
+ "vitess.io/vitess/go/vt/wrangler"
+
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+ vttestpb "vitess.io/vitess/go/vt/proto/vttest"
+)
+
+var (
+ Main = &cobra.Command{
+ Use: "vtcombo",
+ Short: "vtcombo is a single binary containing several vitess components.",
+ Long: `vtcombo is a single binary containing several vitess components.
+
+In particular, it contains:
+- A topology server based on an in-memory map.
+- One vtgate instance.
+- Many vttablet instances.
+- A vtctld instance so it's easy to see the topology.`,
+ Args: cobra.NoArgs,
+ Version: servenv.AppVersion.String(),
+ PreRunE: servenv.CobraPreRunE,
+ RunE: run,
+ }
+ schemaDir string
+ startMysql bool
+ mysqlPort = 3306
+ externalTopoServer bool
+ plannerName string
+ vschemaPersistenceDir string
+
+ tpb vttestpb.VTTestTopology
+ ts *topo.Server
+ resilientServer *srvtopo.ResilientServer
+)
+
+func init() {
+ servenv.RegisterDefaultFlags()
+ servenv.RegisterFlags()
+ servenv.RegisterGRPCServerFlags()
+ servenv.RegisterGRPCServerAuthFlags()
+ servenv.RegisterServiceMapFlag()
+
+ dbconfigs.RegisterFlags(dbconfigs.All...)
+ mysqlctl.RegisterFlags()
+
+ servenv.MoveFlagsToCobraCommand(Main)
+
+ acl.RegisterFlags(Main.Flags())
+
+ Main.Flags().StringVar(&schemaDir, "schema_dir", schemaDir, "Schema base directory. Should contain one directory per keyspace, with a vschema.json file if necessary.")
+ Main.Flags().BoolVar(&startMysql, "start_mysql", startMysql, "Should vtcombo also start mysql")
+ Main.Flags().IntVar(&mysqlPort, "mysql_port", mysqlPort, "mysql port")
+ Main.Flags().BoolVar(&externalTopoServer, "external_topo_server", externalTopoServer, "Should vtcombo use an external topology server instead of starting its own in-memory topology server. "+
+ "If true, vtcombo will use the flags defined in topo/server.go to open topo server")
+ Main.Flags().StringVar(&plannerName, "planner-version", plannerName, "Sets the default planner to use when the session has not changed it. Valid values are: Gen4, Gen4Greedy, Gen4Left2Right")
+ Main.Flags().StringVar(&vschemaPersistenceDir, "vschema-persistence-dir", vschemaPersistenceDir, "If set, per-keyspace vschema will be persisted in this directory "+
+ "and reloaded into the in-memory topology server across restarts. Bookkeeping is performed using a simple watcher goroutine. "+
+ "This is useful when running vtcombo as an application development container (e.g. vttestserver) where you want to keep the same "+
+ "vschema even if developer's machine reboots. This works in tandem with vttestserver's --persistent_mode flag. Needless to say, "+
+ "this is neither a perfect nor a production solution for vschema persistence. Consider using the --external_topo_server flag if "+
+ "you require a more complete solution. This flag is ignored if --external_topo_server is set.")
+
+ Main.Flags().Var(vttest.TextTopoData(&tpb), "proto_topo", "vttest proto definition of the topology, encoded in compact text format. See vttest.proto for more information.")
+ Main.Flags().Var(vttest.JSONTopoData(&tpb), "json_topo", "vttest proto definition of the topology, encoded in json format. See vttest.proto for more information.")
+
+ // We're going to force the value later, so don't even bother letting the
+ // user know about this flag.
+ Main.Flags().MarkHidden("tablet_protocol")
+}
+
+func startMysqld(uid uint32) (mysqld *mysqlctl.Mysqld, cnf *mysqlctl.Mycnf, err error) {
+ ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
+ defer cancel()
+
+ mycnfFile := mysqlctl.MycnfFile(uid)
+
+ if _, statErr := os.Stat(mycnfFile); os.IsNotExist(statErr) {
+ mysqld, cnf, err = mysqlctl.CreateMysqldAndMycnf(uid, "", mysqlPort)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to initialize mysql config :%w", err)
+ }
+ if err := mysqld.Init(ctx, cnf, ""); err != nil {
+ return nil, nil, fmt.Errorf("failed to initialize mysql :%w", err)
+ }
+ } else {
+ mysqld, cnf, err = mysqlctl.OpenMysqldAndMycnf(uid)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to find mysql config: %w", err)
+ }
+ err = mysqld.RefreshConfig(ctx, cnf)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to refresh config: %w", err)
+ }
+ if err := mysqld.Start(ctx, cnf); err != nil {
+ return nil, nil, fmt.Errorf("Failed to start mysqld: %w", err)
+ }
+ }
+
+ return mysqld, cnf, nil
+}
+
+func run(cmd *cobra.Command, args []string) (err error) {
+ // Stash away a copy of the topology that vtcombo was started with.
+ //
+ // We will use this to determine the shard structure when keyspaces
+ // get recreated.
+ originalTopology := (&tpb).CloneVT()
+
+ // default cell to "test" if unspecified
+ if len(tpb.Cells) == 0 {
+ tpb.Cells = append(tpb.Cells, "test")
+ }
+
+ cmd.Flags().Set("cells_to_watch", strings.Join(tpb.Cells, ","))
+
+ // vtctld UI requires the cell flag
+ cmd.Flags().Set("cell", tpb.Cells[0])
+ if cmd.Flags().Lookup("log_dir") == nil {
+ cmd.Flags().Set("log_dir", "$VTDATAROOT/tmp")
+ }
+
+ if externalTopoServer {
+ // Open topo server based on the command line flags defined at topo/server.go
+ // do not create cell info as it should be done by whoever sets up the external topo server
+ ts = topo.Open()
+ } else {
+ // Create topo server. We use a 'memorytopo' implementation.
+ ts = memorytopo.NewServer(context.Background(), tpb.Cells...)
+ }
+
+ // attempt to load any routing rules specified by tpb
+ if err := vtcombo.InitRoutingRules(context.Background(), ts, tpb.GetRoutingRules()); err != nil {
+ return fmt.Errorf("Failed to load routing rules: %w", err)
+ }
+
+ servenv.Init()
+ tabletenv.Init()
+
+ var (
+ mysqld = &vtcomboMysqld{}
+ cnf *mysqlctl.Mycnf
+ )
+
+ if startMysql {
+ mysqld.Mysqld, cnf, err = startMysqld(1)
+ if err != nil {
+ return err
+ }
+ servenv.OnClose(func() {
+ mysqld.Shutdown(context.TODO(), cnf, true)
+ })
+ // We want to ensure we can write to this database
+ mysqld.SetReadOnly(false)
+
+ } else {
+ dbconfigs.GlobalDBConfigs.InitWithSocket("")
+ mysqld.Mysqld = mysqlctl.NewMysqld(&dbconfigs.GlobalDBConfigs)
+ servenv.OnClose(mysqld.Close)
+ }
+
+ // Tablet configuration and init.
+ // Send mycnf as nil because vtcombo won't do backups and restores.
+ //
+ // Also force the `--tablet_manager_protocol` and `--tablet_protocol` flags
+ // to be the "internal" protocol that InitTabletMap registers.
+ cmd.Flags().Set("tablet_manager_protocol", "internal")
+ cmd.Flags().Set("tablet_protocol", "internal")
+ uid, err := vtcombo.InitTabletMap(ts, &tpb, mysqld, &dbconfigs.GlobalDBConfigs, schemaDir, startMysql)
+ if err != nil {
+ // ensure we start mysql in the event we fail here
+ if startMysql {
+ mysqld.Shutdown(context.TODO(), cnf, true)
+ }
+
+ return fmt.Errorf("initTabletMapProto failed: %w", err)
+ }
+
+ globalCreateDb = func(ctx context.Context, ks *vttestpb.Keyspace) error {
+ // Check if we're recreating a keyspace that was previously deleted by looking
+ // at the original topology definition.
+ //
+ // If we find a matching keyspace, we create it with the same sharding
+ // configuration. This ensures that dropping and recreating a keyspace
+ // will end up with the same number of shards.
+ for _, originalKs := range originalTopology.Keyspaces {
+ if originalKs.Name == ks.Name {
+ ks = originalKs.CloneVT()
+ }
+ }
+
+ wr := wrangler.New(logutil.NewConsoleLogger(), ts, nil)
+ newUID, err := vtcombo.CreateKs(ctx, ts, &tpb, mysqld, &dbconfigs.GlobalDBConfigs, schemaDir, ks, true, uid, wr)
+ if err != nil {
+ return err
+ }
+ uid = newUID
+ tpb.Keyspaces = append(tpb.Keyspaces, ks)
+ return nil
+ }
+
+ globalDropDb = func(ctx context.Context, ksName string) error {
+ if err := vtcombo.DeleteKs(ctx, ts, ksName, mysqld, &tpb); err != nil {
+ return err
+ }
+
+ // Rebuild the SrvVSchema object
+ if err := ts.RebuildSrvVSchema(ctx, tpb.Cells); err != nil {
+ return err
+ }
+
+ return nil
+ }
+
+ // Now that we have fully initialized the tablets, rebuild the keyspace graph.
+ for _, ks := range tpb.Keyspaces {
+ err := topotools.RebuildKeyspace(context.Background(), logutil.NewConsoleLogger(), ts, ks.GetName(), tpb.Cells, false)
+ if err != nil {
+ if startMysql {
+ mysqld.Shutdown(context.TODO(), cnf, true)
+ }
+
+ return fmt.Errorf("Couldn't build srv keyspace for (%v: %v). Got error: %w", ks, tpb.Cells, err)
+ }
+ }
+
+ // vtgate configuration and init
+ resilientServer = srvtopo.NewResilientServer(context.Background(), ts, "ResilientSrvTopoServer")
+ tabletTypesToWait := []topodatapb.TabletType{
+ topodatapb.TabletType_PRIMARY,
+ topodatapb.TabletType_REPLICA,
+ topodatapb.TabletType_RDONLY,
+ }
+ plannerVersion, _ := plancontext.PlannerNameToVersion(plannerName)
+
+ vtgate.QueryLogHandler = "/debug/vtgate/querylog"
+ vtgate.QueryLogzHandler = "/debug/vtgate/querylogz"
+ vtgate.QueryzHandler = "/debug/vtgate/queryz"
+ // pass nil for healthcheck, it will get created
+ vtg := vtgate.Init(context.Background(), nil, resilientServer, tpb.Cells[0], tabletTypesToWait, plannerVersion)
+
+ // vtctld configuration and init
+ err = vtctld.InitVtctld(ts)
+ if err != nil {
+ return err
+ }
+
+ if vschemaPersistenceDir != "" && !externalTopoServer {
+ startVschemaWatcher(vschemaPersistenceDir, tpb.Keyspaces, ts)
+ }
+
+ servenv.OnRun(func() {
+ addStatusParts(vtg)
+ })
+
+ servenv.OnTerm(func() {
+ log.Error("Terminating")
+ // FIXME(alainjobart): stop vtgate
+ })
+ servenv.OnClose(func() {
+ // We will still use the topo server during lameduck period
+ // to update our state, so closing it in OnClose()
+ ts.Close()
+ })
+ servenv.RunDefault()
+
+ return nil
+}
+
+// vtcomboMysqld is a wrapper on top of mysqlctl.Mysqld.
+// We need this wrapper because vtcombo runs with a single MySQL instance
+// which all the tablets connect to. (replica, primary, all). This means that we shouldn't
+// be trying to run any replication related commands on it, otherwise they fail.
+type vtcomboMysqld struct {
+ *mysqlctl.Mysqld
+}
+
+// SetReplicationSource implements the MysqlDaemon interface
+func (mysqld *vtcomboMysqld) SetReplicationSource(ctx context.Context, host string, port int32, stopReplicationBefore bool, startReplicationAfter bool) error {
+ return nil
+}
+
+// StartReplication implements the MysqlDaemon interface
+func (mysqld *vtcomboMysqld) StartReplication(hookExtraEnv map[string]string) error {
+ return nil
+}
+
+// RestartReplication implements the MysqlDaemon interface
+func (mysqld *vtcomboMysqld) RestartReplication(hookExtraEnv map[string]string) error {
+ return nil
+}
+
+// StartReplicationUntilAfter implements the MysqlDaemon interface
+func (mysqld *vtcomboMysqld) StartReplicationUntilAfter(ctx context.Context, pos replication.Position) error {
+ return nil
+}
+
+// StopReplication implements the MysqlDaemon interface
+func (mysqld *vtcomboMysqld) StopReplication(hookExtraEnv map[string]string) error {
+ return nil
+}
+
+// SetSemiSyncEnabled implements the MysqlDaemon interface
+func (mysqld *vtcomboMysqld) SetSemiSyncEnabled(source, replica bool) error {
+ return nil
+}
+
+// SemiSyncExtensionLoaded implements the MysqlDaemon interface
+func (mysqld *vtcomboMysqld) SemiSyncExtensionLoaded() (bool, error) {
+ return true, nil
+}
diff --git a/go/cmd/vtcombo/plugin_dbddl.go b/go/cmd/vtcombo/cli/plugin_dbddl.go
similarity index 93%
rename from go/cmd/vtcombo/plugin_dbddl.go
rename to go/cmd/vtcombo/cli/plugin_dbddl.go
index 49a7a601fb1..b04af91af5c 100644
--- a/go/cmd/vtcombo/plugin_dbddl.go
+++ b/go/cmd/vtcombo/cli/plugin_dbddl.go
@@ -14,10 +14,11 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
import (
"context"
+ "sync"
"vitess.io/vitess/go/vt/servenv"
"vitess.io/vitess/go/vt/vtgate/engine"
@@ -29,7 +30,9 @@ var globalCreateDb func(ctx context.Context, ks *vttestpb.Keyspace) error
var globalDropDb func(ctx context.Context, ksName string) error
// DBDDL doesn't need to store any state - we use the global variables above instead
-type DBDDL struct{}
+type DBDDL struct {
+ mu sync.Mutex
+}
// CreateDatabase implements the engine.DBDDLPlugin interface
func (plugin *DBDDL) CreateDatabase(ctx context.Context, name string) error {
@@ -39,6 +42,8 @@ func (plugin *DBDDL) CreateDatabase(ctx context.Context, name string) error {
Name: "0",
}},
}
+ plugin.mu.Lock()
+ defer plugin.mu.Unlock()
return globalCreateDb(ctx, ks)
}
diff --git a/go/cmd/vtcombo/plugin_grpcvtctldserver.go b/go/cmd/vtcombo/cli/plugin_grpcvtctldserver.go
similarity index 98%
rename from go/cmd/vtcombo/plugin_grpcvtctldserver.go
rename to go/cmd/vtcombo/cli/plugin_grpcvtctldserver.go
index e5bba399072..2cf8eed8368 100644
--- a/go/cmd/vtcombo/plugin_grpcvtctldserver.go
+++ b/go/cmd/vtcombo/cli/plugin_grpcvtctldserver.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
import (
"vitess.io/vitess/go/vt/servenv"
diff --git a/go/cmd/vtctld/plugin_grpcvtctlserver.go b/go/cmd/vtcombo/cli/plugin_grpcvtctlserver.go
similarity index 98%
rename from go/cmd/vtctld/plugin_grpcvtctlserver.go
rename to go/cmd/vtcombo/cli/plugin_grpcvtctlserver.go
index 4ec5323b075..8b7f918bc58 100644
--- a/go/cmd/vtctld/plugin_grpcvtctlserver.go
+++ b/go/cmd/vtcombo/cli/plugin_grpcvtctlserver.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
import (
"vitess.io/vitess/go/vt/servenv"
diff --git a/go/cmd/vtcombo/plugin_grpcvtgateservice.go b/go/cmd/vtcombo/cli/plugin_grpcvtgateservice.go
similarity index 98%
rename from go/cmd/vtcombo/plugin_grpcvtgateservice.go
rename to go/cmd/vtcombo/cli/plugin_grpcvtgateservice.go
index ff58dff616a..a980f063577 100644
--- a/go/cmd/vtcombo/plugin_grpcvtgateservice.go
+++ b/go/cmd/vtcombo/cli/plugin_grpcvtgateservice.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
// Imports and register the gRPC vtgateservice server
diff --git a/go/cmd/vtcombo/plugin_opentracing.go b/go/cmd/vtcombo/cli/plugin_opentracing.go
similarity index 98%
rename from go/cmd/vtcombo/plugin_opentracing.go
rename to go/cmd/vtcombo/cli/plugin_opentracing.go
index c2ea8325e6a..0b9274b498d 100644
--- a/go/cmd/vtcombo/plugin_opentracing.go
+++ b/go/cmd/vtcombo/cli/plugin_opentracing.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
import (
"vitess.io/vitess/go/trace"
diff --git a/go/cmd/vtcombo/status.go b/go/cmd/vtcombo/cli/status.go
similarity index 96%
rename from go/cmd/vtcombo/status.go
rename to go/cmd/vtcombo/cli/status.go
index 2b5e2696391..8069fc72606 100644
--- a/go/cmd/vtcombo/status.go
+++ b/go/cmd/vtcombo/cli/status.go
@@ -14,15 +14,13 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
import (
"vitess.io/vitess/go/vt/discovery"
"vitess.io/vitess/go/vt/servenv"
"vitess.io/vitess/go/vt/srvtopo"
"vitess.io/vitess/go/vt/vtgate"
-
- _ "vitess.io/vitess/go/vt/status"
)
func addStatusParts(vtg *vtgate.VTGate) {
diff --git a/go/cmd/vtcombo/cli/vschema_watcher.go b/go/cmd/vtcombo/cli/vschema_watcher.go
new file mode 100644
index 00000000000..c1c9f120b96
--- /dev/null
+++ b/go/cmd/vtcombo/cli/vschema_watcher.go
@@ -0,0 +1,117 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cli
+
+import (
+ "context"
+ "encoding/json"
+ "os"
+ "path"
+
+ "vitess.io/vitess/go/vt/log"
+ "vitess.io/vitess/go/vt/topo"
+ "vitess.io/vitess/go/vt/vtgate/vindexes"
+
+ vschemapb "vitess.io/vitess/go/vt/proto/vschema"
+ vttestpb "vitess.io/vitess/go/vt/proto/vttest"
+)
+
+func startVschemaWatcher(vschemaPersistenceDir string, keyspaces []*vttestpb.Keyspace, ts *topo.Server) {
+ // Create the directory if it doesn't exist.
+ if err := createDirectoryIfNotExists(vschemaPersistenceDir); err != nil {
+ log.Fatalf("Unable to create vschema persistence directory %v: %v", vschemaPersistenceDir, err)
+ }
+
+ // If there are keyspace files, load them.
+ loadKeyspacesFromDir(vschemaPersistenceDir, keyspaces, ts)
+
+ // Rebuild the SrvVSchema object in case we loaded vschema from file
+ if err := ts.RebuildSrvVSchema(context.Background(), tpb.Cells); err != nil {
+ log.Fatalf("RebuildSrvVSchema failed: %v", err)
+ }
+
+ // Now watch for changes in the SrvVSchema object and persist them to disk.
+ go watchSrvVSchema(context.Background(), ts, tpb.Cells[0])
+}
+
+func loadKeyspacesFromDir(dir string, keyspaces []*vttestpb.Keyspace, ts *topo.Server) {
+ for _, ks := range tpb.Keyspaces {
+ ksFile := path.Join(dir, ks.Name+".json")
+ if _, err := os.Stat(ksFile); err == nil {
+ jsonData, err := os.ReadFile(ksFile)
+ if err != nil {
+ log.Fatalf("Unable to read keyspace file %v: %v", ksFile, err)
+ }
+
+ keyspace := &vschemapb.Keyspace{}
+ err = json.Unmarshal(jsonData, keyspace)
+ if err != nil {
+ log.Fatalf("Unable to parse keyspace file %v: %v", ksFile, err)
+ }
+
+ _, err = vindexes.BuildKeyspace(keyspace)
+ if err != nil {
+ log.Fatalf("Invalid keyspace definition: %v", err)
+ }
+ ts.SaveVSchema(context.Background(), ks.Name, keyspace)
+ log.Infof("Loaded keyspace %v from %v\n", ks.Name, ksFile)
+ }
+ }
+}
+
+func watchSrvVSchema(ctx context.Context, ts *topo.Server, cell string) {
+ data, ch, err := ts.WatchSrvVSchema(context.Background(), tpb.Cells[0])
+ if err != nil {
+ log.Fatalf("WatchSrvVSchema failed: %v", err)
+ }
+
+ if data.Err != nil {
+ log.Fatalf("WatchSrvVSchema could not retrieve initial vschema: %v", data.Err)
+ }
+ persistNewSrvVSchema(data.Value)
+
+ for update := range ch {
+ if update.Err != nil {
+ log.Errorf("WatchSrvVSchema returned an error: %v", update.Err)
+ } else {
+ persistNewSrvVSchema(update.Value)
+ }
+ }
+}
+
+func persistNewSrvVSchema(srvVSchema *vschemapb.SrvVSchema) {
+ for ksName, ks := range srvVSchema.Keyspaces {
+ jsonBytes, err := json.MarshalIndent(ks, "", " ")
+ if err != nil {
+ log.Errorf("Error marshaling keyspace: %v", err)
+ continue
+ }
+
+ err = os.WriteFile(path.Join(vschemaPersistenceDir, ksName+".json"), jsonBytes, 0644)
+ if err != nil {
+ log.Errorf("Error writing keyspace file: %v", err)
+ }
+ log.Infof("Persisted keyspace %v to %v", ksName, vschemaPersistenceDir)
+ }
+}
+
+func createDirectoryIfNotExists(dir string) error {
+ if _, err := os.Stat(dir); os.IsNotExist(err) {
+ return os.Mkdir(dir, 0755)
+ }
+ return nil
+}
diff --git a/go/cmd/vtcombo/docgen/main.go b/go/cmd/vtcombo/docgen/main.go
new file mode 100644
index 00000000000..31304296b3e
--- /dev/null
+++ b/go/cmd/vtcombo/docgen/main.go
@@ -0,0 +1,37 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/cmd/internal/docgen"
+ "vitess.io/vitess/go/cmd/vtcombo/cli"
+)
+
+func main() {
+ var dir string
+ cmd := cobra.Command{
+ Use: "docgen [-d ]",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return docgen.GenerateMarkdownTree(cli.Main, dir)
+ },
+ }
+
+ cmd.Flags().StringVarP(&dir, "dir", "d", "doc", "output directory to write documentation")
+ _ = cmd.Execute()
+}
diff --git a/go/cmd/vtcombo/main.go b/go/cmd/vtcombo/main.go
index affbf0520e7..f5de215b617 100644
--- a/go/cmd/vtcombo/main.go
+++ b/go/cmd/vtcombo/main.go
@@ -22,331 +22,16 @@ limitations under the License.
package main
import (
- "context"
- "os"
- "strings"
- "time"
-
- "github.com/spf13/pflag"
- "google.golang.org/protobuf/proto"
-
- "vitess.io/vitess/go/acl"
+ "vitess.io/vitess/go/cmd/vtcombo/cli"
"vitess.io/vitess/go/exit"
- "vitess.io/vitess/go/mysql"
- "vitess.io/vitess/go/vt/dbconfigs"
"vitess.io/vitess/go/vt/log"
- "vitess.io/vitess/go/vt/logutil"
- "vitess.io/vitess/go/vt/mysqlctl"
- "vitess.io/vitess/go/vt/servenv"
- "vitess.io/vitess/go/vt/srvtopo"
- "vitess.io/vitess/go/vt/topo"
- "vitess.io/vitess/go/vt/topo/memorytopo"
- "vitess.io/vitess/go/vt/topotools"
- "vitess.io/vitess/go/vt/vtcombo"
- "vitess.io/vitess/go/vt/vtctld"
- "vitess.io/vitess/go/vt/vtgate"
- "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
- "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv"
- "vitess.io/vitess/go/vt/vttest"
- "vitess.io/vitess/go/vt/wrangler"
-
- topodatapb "vitess.io/vitess/go/vt/proto/topodata"
- vttestpb "vitess.io/vitess/go/vt/proto/vttest"
)
-var (
- flags = pflag.NewFlagSet("vtcombo", pflag.ContinueOnError)
- schemaDir = flags.String("schema_dir", "", "Schema base directory. Should contain one directory per keyspace, with a vschema.json file if necessary.")
- startMysql = flags.Bool("start_mysql", false, "Should vtcombo also start mysql")
- mysqlPort = flags.Int("mysql_port", 3306, "mysql port")
- externalTopoServer = flags.Bool("external_topo_server", false, "Should vtcombo use an external topology server instead of starting its own in-memory topology server. "+
- "If true, vtcombo will use the flags defined in topo/server.go to open topo server")
- plannerName = flags.String("planner-version", "", "Sets the default planner to use when the session has not changed it. Valid values are: V3, V3Insert, Gen4, Gen4Greedy and Gen4Fallback. Gen4Fallback tries the gen4 planner and falls back to the V3 planner if the gen4 fails.")
-
- tpb vttestpb.VTTestTopology
- ts *topo.Server
- resilientServer *srvtopo.ResilientServer
-)
-
-func init() {
- flags.Var(vttest.TextTopoData(&tpb), "proto_topo", "vttest proto definition of the topology, encoded in compact text format. See vttest.proto for more information.")
- flags.Var(vttest.JSONTopoData(&tpb), "json_topo", "vttest proto definition of the topology, encoded in json format. See vttest.proto for more information.")
-
- servenv.RegisterDefaultFlags()
- servenv.RegisterFlags()
- servenv.RegisterGRPCServerFlags()
- servenv.RegisterGRPCServerAuthFlags()
- servenv.RegisterServiceMapFlag()
-}
-
-func startMysqld(uid uint32) (*mysqlctl.Mysqld, *mysqlctl.Mycnf) {
- ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
- mycnfFile := mysqlctl.MycnfFile(uid)
-
- var mysqld *mysqlctl.Mysqld
- var cnf *mysqlctl.Mycnf
- var err error
-
- if _, statErr := os.Stat(mycnfFile); os.IsNotExist(statErr) {
- mysqld, cnf, err = mysqlctl.CreateMysqldAndMycnf(uid, "", *mysqlPort)
- if err != nil {
- log.Errorf("failed to initialize mysql config :%v", err)
- exit.Return(1)
- }
- if err := mysqld.Init(ctx, cnf, ""); err != nil {
- log.Errorf("failed to initialize mysql :%v", err)
- exit.Return(1)
- }
- } else {
- mysqld, cnf, err = mysqlctl.OpenMysqldAndMycnf(uid)
- if err != nil {
- log.Errorf("failed to find mysql config: %v", err)
- exit.Return(1)
- }
- err = mysqld.RefreshConfig(ctx, cnf)
- if err != nil {
- log.Errorf("failed to refresh config: %v", err)
- exit.Return(1)
- }
- if err := mysqld.Start(ctx, cnf); err != nil {
- log.Errorf("Failed to start mysqld: %v", err)
- exit.Return(1)
- }
- }
- cancel()
- return mysqld, cnf
-}
-
func main() {
defer exit.Recover()
- // flag parsing
- var globalFlags *pflag.FlagSet
- dbconfigs.RegisterFlags(dbconfigs.All...)
- mysqlctl.RegisterFlags()
- servenv.OnParseFor("vtcombo", func(fs *pflag.FlagSet) {
- // We're going to force the value later, so don't even bother letting
- // the user know about this flag.
- fs.MarkHidden("tablet_protocol")
-
- // Add the vtcombo flags declared above in var/init sections to the
- // global flags.
- fs.AddFlagSet(flags)
- // Save for later -- see comment directly after ParseFlags for why.
- globalFlags = fs
-
- acl.RegisterFlags(fs)
- })
-
- servenv.ParseFlags("vtcombo")
-
- // At this point, servenv.ParseFlags has invoked _flag.Parse, which has
- // combined all the flags everywhere into the globalFlags variable we
- // stashed a reference to earlier in our OnParseFor callback function.
- //
- // We now take those flags and make them available to our `flags` instance,
- // which we call `Set` on various flags to force their values further down
- // in main().
- //
- // N.B.: we could just as easily call Set on globalFlags on everything
- // (including our local flags), but we need to save a reference either way,
- // and that in particular (globalFlags.Set on a local flag) feels more
- // potentially confusing than its inverse (flags.Set on a global flag), so
- // we go this way.
- flags.AddFlagSet(globalFlags)
-
- // Stash away a copy of the topology that vtcombo was started with.
- //
- // We will use this to determine the shard structure when keyspaces
- // get recreated.
- originalTopology := proto.Clone(&tpb).(*vttestpb.VTTestTopology)
-
- // default cell to "test" if unspecified
- if len(tpb.Cells) == 0 {
- tpb.Cells = append(tpb.Cells, "test")
- }
-
- flags.Set("cells_to_watch", strings.Join(tpb.Cells, ","))
-
- // vtctld UI requires the cell flag
- flags.Set("cell", tpb.Cells[0])
- if flags.Lookup("log_dir") == nil {
- flags.Set("log_dir", "$VTDATAROOT/tmp")
- }
-
- if *externalTopoServer {
- // Open topo server based on the command line flags defined at topo/server.go
- // do not create cell info as it should be done by whoever sets up the external topo server
- ts = topo.Open()
- } else {
- // Create topo server. We use a 'memorytopo' implementation.
- ts = memorytopo.NewServer(tpb.Cells...)
- }
-
- // attempt to load any routing rules specified by tpb
- if err := vtcombo.InitRoutingRules(context.Background(), ts, tpb.GetRoutingRules()); err != nil {
- log.Errorf("Failed to load routing rules: %v", err)
- exit.Return(1)
- }
-
- servenv.Init()
- tabletenv.Init()
-
- mysqld := &vtcomboMysqld{}
- var cnf *mysqlctl.Mycnf
- if *startMysql {
- mysqld.Mysqld, cnf = startMysqld(1)
- servenv.OnClose(func() {
- mysqld.Shutdown(context.TODO(), cnf, true)
- })
- // We want to ensure we can write to this database
- mysqld.SetReadOnly(false)
-
- } else {
- dbconfigs.GlobalDBConfigs.InitWithSocket("")
- mysqld.Mysqld = mysqlctl.NewMysqld(&dbconfigs.GlobalDBConfigs)
- servenv.OnClose(mysqld.Close)
- }
- // Tablet configuration and init.
- // Send mycnf as nil because vtcombo won't do backups and restores.
- //
- // Also force the `--tablet_manager_protocol` and `--tablet_protocol` flags
- // to be the "internal" protocol that InitTabletMap registers.
- flags.Set("tablet_manager_protocol", "internal")
- flags.Set("tablet_protocol", "internal")
- uid, err := vtcombo.InitTabletMap(ts, &tpb, mysqld, &dbconfigs.GlobalDBConfigs, *schemaDir, *startMysql)
- if err != nil {
- log.Errorf("initTabletMapProto failed: %v", err)
- // ensure we start mysql in the event we fail here
- if *startMysql {
- mysqld.Shutdown(context.TODO(), cnf, true)
- }
+ if err := cli.Main.Execute(); err != nil {
+ log.Error(err)
exit.Return(1)
}
-
- globalCreateDb = func(ctx context.Context, ks *vttestpb.Keyspace) error {
- // Check if we're recreating a keyspace that was previously deleted by looking
- // at the original topology definition.
- //
- // If we find a matching keyspace, we create it with the same sharding
- // configuration. This ensures that dropping and recreating a keyspace
- // will end up with the same number of shards.
- for _, originalKs := range originalTopology.Keyspaces {
- if originalKs.Name == ks.Name {
- ks = proto.Clone(originalKs).(*vttestpb.Keyspace)
- }
- }
-
- wr := wrangler.New(logutil.NewConsoleLogger(), ts, nil)
- newUID, err := vtcombo.CreateKs(ctx, ts, &tpb, mysqld, &dbconfigs.GlobalDBConfigs, *schemaDir, ks, true, uid, wr)
- if err != nil {
- return err
- }
- uid = newUID
- tpb.Keyspaces = append(tpb.Keyspaces, ks)
- return nil
- }
-
- globalDropDb = func(ctx context.Context, ksName string) error {
- if err := vtcombo.DeleteKs(ctx, ts, ksName, mysqld, &tpb); err != nil {
- return err
- }
-
- // Rebuild the SrvVSchema object
- if err := ts.RebuildSrvVSchema(ctx, tpb.Cells); err != nil {
- return err
- }
-
- return nil
- }
-
- // Now that we have fully initialized the tablets, rebuild the keyspace graph.
- for _, ks := range tpb.Keyspaces {
- err := topotools.RebuildKeyspace(context.Background(), logutil.NewConsoleLogger(), ts, ks.GetName(), tpb.Cells, false)
- if err != nil {
- if *startMysql {
- mysqld.Shutdown(context.TODO(), cnf, true)
- }
- log.Fatalf("Couldn't build srv keyspace for (%v: %v). Got error: %v", ks, tpb.Cells, err)
- }
- }
-
- // vtgate configuration and init
- resilientServer = srvtopo.NewResilientServer(ts, "ResilientSrvTopoServer")
- tabletTypesToWait := []topodatapb.TabletType{
- topodatapb.TabletType_PRIMARY,
- topodatapb.TabletType_REPLICA,
- topodatapb.TabletType_RDONLY,
- }
- plannerVersion, _ := plancontext.PlannerNameToVersion(*plannerName)
-
- vtgate.QueryLogHandler = "/debug/vtgate/querylog"
- vtgate.QueryLogzHandler = "/debug/vtgate/querylogz"
- vtgate.QueryzHandler = "/debug/vtgate/queryz"
- // pass nil for healthcheck, it will get created
- vtg := vtgate.Init(context.Background(), nil, resilientServer, tpb.Cells[0], tabletTypesToWait, plannerVersion)
-
- // vtctld configuration and init
- err = vtctld.InitVtctld(ts)
- if err != nil {
- exit.Return(1)
- }
-
- servenv.OnRun(func() {
- addStatusParts(vtg)
- })
-
- servenv.OnTerm(func() {
- log.Error("Terminating")
- // FIXME(alainjobart): stop vtgate
- })
- servenv.OnClose(func() {
- // We will still use the topo server during lameduck period
- // to update our state, so closing it in OnClose()
- ts.Close()
- })
- servenv.RunDefault()
-}
-
-// vtcomboMysqld is a wrapper on top of mysqlctl.Mysqld.
-// We need this wrapper because vtcombo runs with a single MySQL instance
-// which all the tablets connect to. (replica, primary, all). This means that we shouldn't
-// be trying to run any replication related commands on it, otherwise they fail.
-type vtcomboMysqld struct {
- *mysqlctl.Mysqld
-}
-
-// SetReplicationSource implements the MysqlDaemon interface
-func (mysqld *vtcomboMysqld) SetReplicationSource(ctx context.Context, host string, port int32, stopReplicationBefore bool, startReplicationAfter bool) error {
- return nil
-}
-
-// StartReplication implements the MysqlDaemon interface
-func (mysqld *vtcomboMysqld) StartReplication(hookExtraEnv map[string]string) error {
- return nil
-}
-
-// RestartReplication implements the MysqlDaemon interface
-func (mysqld *vtcomboMysqld) RestartReplication(hookExtraEnv map[string]string) error {
- return nil
-}
-
-// StartReplicationUntilAfter implements the MysqlDaemon interface
-func (mysqld *vtcomboMysqld) StartReplicationUntilAfter(ctx context.Context, pos mysql.Position) error {
- return nil
-}
-
-// StopReplication implements the MysqlDaemon interface
-func (mysqld *vtcomboMysqld) StopReplication(hookExtraEnv map[string]string) error {
- return nil
-}
-
-// SetSemiSyncEnabled implements the MysqlDaemon interface
-func (mysqld *vtcomboMysqld) SetSemiSyncEnabled(source, replica bool) error {
- return nil
-}
-
-// SemiSyncExtensionLoaded implements the MysqlDaemon interface
-func (mysqld *vtcomboMysqld) SemiSyncExtensionLoaded() (bool, error) {
- return true, nil
}
diff --git a/go/cmd/vtctld/cli/cli.go b/go/cmd/vtctld/cli/cli.go
new file mode 100644
index 00000000000..e5124133adb
--- /dev/null
+++ b/go/cmd/vtctld/cli/cli.go
@@ -0,0 +1,89 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreedto in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cli
+
+import (
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/acl"
+ "vitess.io/vitess/go/vt/servenv"
+ "vitess.io/vitess/go/vt/topo"
+ "vitess.io/vitess/go/vt/vtctld"
+)
+
+var (
+ ts *topo.Server
+ Main = &cobra.Command{
+ Use: "vtctld",
+ Short: "The Vitess cluster management daemon.",
+ Long: `vtctld provides web and gRPC interfaces to manage a single Vitess cluster.
+It is usually the first Vitess component to be started after a valid global topology service has been created.
+
+For the last several releases, vtctld has been transitioning to a newer gRPC service for well-typed cluster management requests.
+This is **required** to use programs such as vtadmin and vtctldclient, and The old API and service are deprecated and will be removed in a future release.
+To enable this newer service, include "grpc-vtctld" in the --service_map argument.
+This is demonstrated in the example usage below.`,
+ Example: `vtctld \
+ --topo_implementation etcd2 \
+ --topo_global_server_address localhost:2379 \
+ --topo_global_root /vitess/ \
+ --service_map 'grpc-vtctl,grpc-vtctld' \
+ --backup_storage_implementation file \
+ --file_backup_storage_root $VTDATAROOT/backups \
+ --port 15000 \
+ --grpc_port 15999`,
+ Args: cobra.NoArgs,
+ Version: servenv.AppVersion.String(),
+ PreRunE: servenv.CobraPreRunE,
+ RunE: run,
+ }
+)
+
+func run(cmd *cobra.Command, args []string) error {
+ servenv.Init()
+
+ ts = topo.Open()
+ defer ts.Close()
+
+ // Init the vtctld core
+ if err := vtctld.InitVtctld(ts); err != nil {
+ return err
+ }
+
+ // Register http debug/health
+ vtctld.RegisterDebugHealthHandler(ts)
+
+ // Start schema manager service.
+ initSchema()
+
+ // And run the server.
+ servenv.RunDefault()
+
+ return nil
+}
+
+func init() {
+ servenv.RegisterDefaultFlags()
+ servenv.RegisterFlags()
+ servenv.RegisterGRPCServerFlags()
+ servenv.RegisterGRPCServerAuthFlags()
+ servenv.RegisterServiceMapFlag()
+
+ servenv.MoveFlagsToCobraCommand(Main)
+
+ acl.RegisterFlags(Main.Flags())
+}
diff --git a/go/cmd/vtbackup/plugin_azblobbackupstorage.go b/go/cmd/vtctld/cli/plugin_azblobbackupstorage.go
similarity index 97%
rename from go/cmd/vtbackup/plugin_azblobbackupstorage.go
rename to go/cmd/vtctld/cli/plugin_azblobbackupstorage.go
index a4ca64096a9..bdadc894aae 100644
--- a/go/cmd/vtbackup/plugin_azblobbackupstorage.go
+++ b/go/cmd/vtctld/cli/plugin_azblobbackupstorage.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
import (
_ "vitess.io/vitess/go/vt/mysqlctl/azblobbackupstorage"
diff --git a/go/cmd/vtctld/plugin_cephbackupstorage.go b/go/cmd/vtctld/cli/plugin_cephbackupstorage.go
similarity index 97%
rename from go/cmd/vtctld/plugin_cephbackupstorage.go
rename to go/cmd/vtctld/cli/plugin_cephbackupstorage.go
index 6cd2d5619d0..171198f5e29 100644
--- a/go/cmd/vtctld/plugin_cephbackupstorage.go
+++ b/go/cmd/vtctld/cli/plugin_cephbackupstorage.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
import (
_ "vitess.io/vitess/go/vt/mysqlctl/cephbackupstorage"
diff --git a/go/cmd/vtctld/plugin_consultopo.go b/go/cmd/vtctld/cli/plugin_consultopo.go
similarity index 98%
rename from go/cmd/vtctld/plugin_consultopo.go
rename to go/cmd/vtctld/cli/plugin_consultopo.go
index a0c53abe5ea..4617d753953 100644
--- a/go/cmd/vtctld/plugin_consultopo.go
+++ b/go/cmd/vtctld/cli/plugin_consultopo.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
// Imports and register the 'consul' topo.Server.
diff --git a/go/cmd/vtctld/plugin_etcd2topo.go b/go/cmd/vtctld/cli/plugin_etcd2topo.go
similarity index 98%
rename from go/cmd/vtctld/plugin_etcd2topo.go
rename to go/cmd/vtctld/cli/plugin_etcd2topo.go
index 6ec507f910d..06e014fc19f 100644
--- a/go/cmd/vtctld/plugin_etcd2topo.go
+++ b/go/cmd/vtctld/cli/plugin_etcd2topo.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
// Imports and register the 'etcd2' topo.Server.
diff --git a/go/cmd/vttablet/plugin_filebackupstorage.go b/go/cmd/vtctld/cli/plugin_filebackupstorage.go
similarity index 97%
rename from go/cmd/vttablet/plugin_filebackupstorage.go
rename to go/cmd/vtctld/cli/plugin_filebackupstorage.go
index cf2ceb5150f..9edc82d6a1b 100644
--- a/go/cmd/vttablet/plugin_filebackupstorage.go
+++ b/go/cmd/vtctld/cli/plugin_filebackupstorage.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
import (
_ "vitess.io/vitess/go/vt/mysqlctl/filebackupstorage"
diff --git a/go/cmd/vtctld/plugin_gcsbackupstorage.go b/go/cmd/vtctld/cli/plugin_gcsbackupstorage.go
similarity index 97%
rename from go/cmd/vtctld/plugin_gcsbackupstorage.go
rename to go/cmd/vtctld/cli/plugin_gcsbackupstorage.go
index 82a22cef1da..655583c8ca2 100644
--- a/go/cmd/vtctld/plugin_gcsbackupstorage.go
+++ b/go/cmd/vtctld/cli/plugin_gcsbackupstorage.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
import (
_ "vitess.io/vitess/go/vt/mysqlctl/gcsbackupstorage"
diff --git a/go/cmd/vtctld/plugin_grpctabletconn.go b/go/cmd/vtctld/cli/plugin_grpctabletconn.go
similarity index 98%
rename from go/cmd/vtctld/plugin_grpctabletconn.go
rename to go/cmd/vtctld/cli/plugin_grpctabletconn.go
index 08291a7c916..4a97e36eec4 100644
--- a/go/cmd/vtctld/plugin_grpctabletconn.go
+++ b/go/cmd/vtctld/cli/plugin_grpctabletconn.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
// Imports and register the gRPC tabletconn client
diff --git a/go/cmd/vtorc/plugin_grpctmclient.go b/go/cmd/vtctld/cli/plugin_grpctmclient.go
similarity index 98%
rename from go/cmd/vtorc/plugin_grpctmclient.go
rename to go/cmd/vtctld/cli/plugin_grpctmclient.go
index ce554da96df..8cd349c7f87 100644
--- a/go/cmd/vtorc/plugin_grpctmclient.go
+++ b/go/cmd/vtctld/cli/plugin_grpctmclient.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
// Imports and register the gRPC tabletmanager client
diff --git a/go/cmd/vtctld/plugin_grpcvtctldserver.go b/go/cmd/vtctld/cli/plugin_grpcvtctldserver.go
similarity index 98%
rename from go/cmd/vtctld/plugin_grpcvtctldserver.go
rename to go/cmd/vtctld/cli/plugin_grpcvtctldserver.go
index ee5d0aba22a..ff283d91336 100644
--- a/go/cmd/vtctld/plugin_grpcvtctldserver.go
+++ b/go/cmd/vtctld/cli/plugin_grpcvtctldserver.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
import (
"vitess.io/vitess/go/vt/servenv"
diff --git a/go/cmd/vtcombo/plugin_grpcvtctlserver.go b/go/cmd/vtctld/cli/plugin_grpcvtctlserver.go
similarity index 98%
rename from go/cmd/vtcombo/plugin_grpcvtctlserver.go
rename to go/cmd/vtctld/cli/plugin_grpcvtctlserver.go
index 4ec5323b075..8b7f918bc58 100644
--- a/go/cmd/vtcombo/plugin_grpcvtctlserver.go
+++ b/go/cmd/vtctld/cli/plugin_grpcvtctlserver.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
import (
"vitess.io/vitess/go/vt/servenv"
diff --git a/go/cmd/vtctld/plugin_grpcvtgateconn.go b/go/cmd/vtctld/cli/plugin_grpcvtgateconn.go
similarity index 98%
rename from go/cmd/vtctld/plugin_grpcvtgateconn.go
rename to go/cmd/vtctld/cli/plugin_grpcvtgateconn.go
index 87019ea4260..2f05e6d9a4e 100644
--- a/go/cmd/vtctld/plugin_grpcvtgateconn.go
+++ b/go/cmd/vtctld/cli/plugin_grpcvtgateconn.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
// Imports and register the gRPC vtgateconn client
diff --git a/go/cmd/vtctld/plugin_opentracing.go b/go/cmd/vtctld/cli/plugin_opentracing.go
similarity index 98%
rename from go/cmd/vtctld/plugin_opentracing.go
rename to go/cmd/vtctld/cli/plugin_opentracing.go
index c35034d42a2..76423623493 100644
--- a/go/cmd/vtctld/plugin_opentracing.go
+++ b/go/cmd/vtctld/cli/plugin_opentracing.go
@@ -14,11 +14,10 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
import (
"vitess.io/vitess/go/trace"
-
"vitess.io/vitess/go/vt/servenv"
)
diff --git a/go/cmd/vtctld/plugin_opentsdb.go b/go/cmd/vtctld/cli/plugin_opentsdb.go
similarity index 98%
rename from go/cmd/vtctld/plugin_opentsdb.go
rename to go/cmd/vtctld/cli/plugin_opentsdb.go
index 38f464dd887..e4f76d29009 100644
--- a/go/cmd/vtctld/plugin_opentsdb.go
+++ b/go/cmd/vtctld/cli/plugin_opentsdb.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
// This plugin imports opentsdb to register the opentsdb stats backend.
diff --git a/go/cmd/vtctld/plugin_prometheusbackend.go b/go/cmd/vtctld/cli/plugin_prometheusbackend.go
similarity index 98%
rename from go/cmd/vtctld/plugin_prometheusbackend.go
rename to go/cmd/vtctld/cli/plugin_prometheusbackend.go
index f3c33e5637b..3c66018fe75 100644
--- a/go/cmd/vtctld/plugin_prometheusbackend.go
+++ b/go/cmd/vtctld/cli/plugin_prometheusbackend.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
// This plugin imports Prometheus to allow for instrumentation
// with the Prometheus client library
diff --git a/go/cmd/vtctld/plugin_s3backupstorage.go b/go/cmd/vtctld/cli/plugin_s3backupstorage.go
similarity index 97%
rename from go/cmd/vtctld/plugin_s3backupstorage.go
rename to go/cmd/vtctld/cli/plugin_s3backupstorage.go
index a5b5c671ebb..4b3ecb33edb 100644
--- a/go/cmd/vtctld/plugin_s3backupstorage.go
+++ b/go/cmd/vtctld/cli/plugin_s3backupstorage.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
import (
_ "vitess.io/vitess/go/vt/mysqlctl/s3backupstorage"
diff --git a/go/cmd/vtctld/plugin_zk2topo.go b/go/cmd/vtctld/cli/plugin_zk2topo.go
similarity index 98%
rename from go/cmd/vtctld/plugin_zk2topo.go
rename to go/cmd/vtctld/cli/plugin_zk2topo.go
index 531d92c4cdd..77f86d98d52 100644
--- a/go/cmd/vtctld/plugin_zk2topo.go
+++ b/go/cmd/vtctld/cli/plugin_zk2topo.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
// Imports and register the 'zk2' topo.Server.
diff --git a/go/cmd/vtctld/schema.go b/go/cmd/vtctld/cli/schema.go
similarity index 63%
rename from go/cmd/vtctld/schema.go
rename to go/cmd/vtctld/cli/schema.go
index 3bd7ae091c2..480679a09e6 100644
--- a/go/cmd/vtctld/schema.go
+++ b/go/cmd/vtctld/cli/schema.go
@@ -14,19 +14,18 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
import (
"context"
"time"
- "github.com/spf13/pflag"
-
"vitess.io/vitess/go/timer"
"vitess.io/vitess/go/vt/log"
"vitess.io/vitess/go/vt/logutil"
"vitess.io/vitess/go/vt/schemamanager"
"vitess.io/vitess/go/vt/servenv"
+ "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver"
"vitess.io/vitess/go/vt/vttablet/tmclient"
"vitess.io/vitess/go/vt/wrangler"
)
@@ -36,18 +35,16 @@ var (
schemaChangeController string
schemaChangeUser string
schemaChangeCheckInterval = time.Minute
- schemaChangeReplicasTimeout = wrangler.DefaultWaitReplicasTimeout
+ schemaChangeReplicasTimeout = grpcvtctldserver.DefaultWaitReplicasTimeout
)
func init() {
- servenv.OnParse(func(fs *pflag.FlagSet) {
- fs.StringVar(&schemaChangeDir, "schema_change_dir", schemaChangeDir, "Directory containing schema changes for all keyspaces. Each keyspace has its own directory, and schema changes are expected to live in '$KEYSPACE/input' dir. (e.g. 'test_keyspace/input/*sql'). Each sql file represents a schema change.")
- fs.StringVar(&schemaChangeController, "schema_change_controller", schemaChangeController, "Schema change controller is responsible for finding schema changes and responding to schema change events.")
- fs.StringVar(&schemaChangeUser, "schema_change_user", schemaChangeUser, "The user who schema changes are submitted on behalf of.")
+ Main.Flags().StringVar(&schemaChangeDir, "schema_change_dir", schemaChangeDir, "Directory containing schema changes for all keyspaces. Each keyspace has its own directory, and schema changes are expected to live in '$KEYSPACE/input' dir. (e.g. 'test_keyspace/input/*sql'). Each sql file represents a schema change.")
+ Main.Flags().StringVar(&schemaChangeController, "schema_change_controller", schemaChangeController, "Schema change controller is responsible for finding schema changes and responding to schema change events.")
+ Main.Flags().StringVar(&schemaChangeUser, "schema_change_user", schemaChangeUser, "The user who schema changes are submitted on behalf of.")
- fs.DurationVar(&schemaChangeCheckInterval, "schema_change_check_interval", schemaChangeCheckInterval, "How often the schema change dir is checked for schema changes. This value must be positive; if zero or lower, the default of 1m is used.")
- fs.DurationVar(&schemaChangeReplicasTimeout, "schema_change_replicas_timeout", schemaChangeReplicasTimeout, "How long to wait for replicas to receive a schema change.")
- })
+ Main.Flags().DurationVar(&schemaChangeCheckInterval, "schema_change_check_interval", schemaChangeCheckInterval, "How often the schema change dir is checked for schema changes. This value must be positive; if zero or lower, the default of 1m is used.")
+ Main.Flags().DurationVar(&schemaChangeReplicasTimeout, "schema_change_replicas_timeout", schemaChangeReplicasTimeout, "How long to wait for replicas to receive a schema change.")
}
func initSchema() {
@@ -78,7 +75,7 @@ func initSchema() {
_, err = schemamanager.Run(
ctx,
controller,
- schemamanager.NewTabletExecutor("vtctld/schema", wr.TopoServer(), wr.TabletManagerClient(), wr.Logger(), schemaChangeReplicasTimeout),
+ schemamanager.NewTabletExecutor("vtctld/schema", wr.TopoServer(), wr.TabletManagerClient(), wr.Logger(), schemaChangeReplicasTimeout, 0),
)
if err != nil {
log.Errorf("Schema change failed, error: %v", err)
diff --git a/go/cmd/vtctld/docgen/main.go b/go/cmd/vtctld/docgen/main.go
new file mode 100644
index 00000000000..4243153859e
--- /dev/null
+++ b/go/cmd/vtctld/docgen/main.go
@@ -0,0 +1,37 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/cmd/internal/docgen"
+ "vitess.io/vitess/go/cmd/vtctld/cli"
+)
+
+func main() {
+ var dir string
+ cmd := cobra.Command{
+ Use: "docgen [-d ]",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return docgen.GenerateMarkdownTree(cli.Main, dir)
+ },
+ }
+
+ cmd.Flags().StringVarP(&dir, "dir", "d", "doc", "output directory to write documentation")
+ _ = cmd.Execute()
+}
diff --git a/go/cmd/vtctld/main.go b/go/cmd/vtctld/main.go
index 26f9e100c19..6f9ab7384fc 100644
--- a/go/cmd/vtctld/main.go
+++ b/go/cmd/vtctld/main.go
@@ -17,52 +17,12 @@ limitations under the License.
package main
import (
- "github.com/spf13/pflag"
-
- "vitess.io/vitess/go/acl"
- "vitess.io/vitess/go/exit"
- "vitess.io/vitess/go/vt/servenv"
- "vitess.io/vitess/go/vt/topo"
- "vitess.io/vitess/go/vt/vtctld"
-)
-
-func init() {
- servenv.RegisterDefaultFlags()
- servenv.RegisterFlags()
- servenv.RegisterGRPCServerFlags()
- servenv.RegisterGRPCServerAuthFlags()
- servenv.RegisterServiceMapFlag()
-
- servenv.OnParse(func(fs *pflag.FlagSet) {
- acl.RegisterFlags(fs)
- })
-}
-
-// used at runtime by plug-ins
-var (
- ts *topo.Server
+ "vitess.io/vitess/go/cmd/vtctld/cli"
+ "vitess.io/vitess/go/vt/log"
)
func main() {
- servenv.ParseFlags("vtctld")
- servenv.Init()
- defer servenv.Close()
-
- ts = topo.Open()
- defer ts.Close()
-
- // Init the vtctld core
- err := vtctld.InitVtctld(ts)
- if err != nil {
- exit.Return(1)
+ if err := cli.Main.Execute(); err != nil {
+ log.Fatal(err)
}
-
- // Register http debug/health
- vtctld.RegisterDebugHealthHandler(ts)
-
- // Start schema manager service.
- initSchema()
-
- // And run the server.
- servenv.RunDefault()
}
diff --git a/go/cmd/vtctldclient/cli/awk.go b/go/cmd/vtctldclient/cli/awk.go
index c68b0fc0627..2916034a3ca 100644
--- a/go/cmd/vtctldclient/cli/awk.go
+++ b/go/cmd/vtctldclient/cli/awk.go
@@ -22,7 +22,7 @@ import (
"strings"
"time"
- "vitess.io/vitess/go/vt/logutil"
+ "vitess.io/vitess/go/protoutil"
"vitess.io/vitess/go/vt/topo"
"vitess.io/vitess/go/vt/topo/topoproto"
@@ -66,7 +66,7 @@ func MarshalTabletAWK(t *topodatapb.Tablet) string {
// special case for old primary that hasn't been updated in the topo
// yet.
if t.PrimaryTermStartTime != nil && t.PrimaryTermStartTime.Seconds > 0 {
- mtst = logutil.ProtoToTime(t.PrimaryTermStartTime).Format(time.RFC3339)
+ mtst = protoutil.TimeFromProto(t.PrimaryTermStartTime).UTC().Format(time.RFC3339)
}
return fmt.Sprintf("%v %v %v %v %v %v %v %v", topoproto.TabletAliasString(t.Alias), keyspace, shard, topoproto.TabletTypeLString(t.Type), ti.Addr(), ti.MysqlAddr(), MarshalMapAWK(t.Tags), mtst)
diff --git a/go/cmd/vtctldclient/cli/json.go b/go/cmd/vtctldclient/cli/json.go
index 80af6d80d72..fb9ed2c35ac 100644
--- a/go/cmd/vtctldclient/cli/json.go
+++ b/go/cmd/vtctldclient/cli/json.go
@@ -25,6 +25,19 @@ import (
"google.golang.org/protobuf/proto"
)
+const (
+ jsonIndent = " "
+ jsonPrefix = ""
+)
+
+var DefaultMarshalOptions = protojson.MarshalOptions{
+ Multiline: true,
+ Indent: jsonIndent,
+ UseEnumNumbers: false,
+ UseProtoNames: true,
+ EmitUnpopulated: true, // Can be set to false via the --compact flag
+}
+
// MarshalJSON marshals obj to a JSON string. It uses the jsonpb marshaler for
// proto.Message types, with some sensible defaults, and falls back to the
// standard Go marshaler otherwise. In both cases, the marshaled JSON is
@@ -34,19 +47,22 @@ import (
// either by being a proto message type or by anonymously embedding one, so for
// other types that may have nested struct fields, we still use the standard Go
// marshaler, which will result in different formattings.
-func MarshalJSON(obj any) ([]byte, error) {
+func MarshalJSON(obj any, marshalOptions ...protojson.MarshalOptions) ([]byte, error) {
switch obj := obj.(type) {
case proto.Message:
- m := protojson.MarshalOptions{
- Multiline: true,
- Indent: " ",
- UseEnumNumbers: true,
- UseProtoNames: true,
- EmitUnpopulated: true,
+ m := DefaultMarshalOptions
+ switch len(marshalOptions) {
+ case 0: // Use default
+ case 1: // Use provided one
+ m = marshalOptions[0]
+ default:
+ return nil, fmt.Errorf("there should only be one optional MarshalOptions value but we had %d",
+ len(marshalOptions))
}
+
return m.Marshal(obj)
default:
- data, err := json.MarshalIndent(obj, "", " ")
+ data, err := json.MarshalIndent(obj, jsonPrefix, jsonIndent)
if err != nil {
return nil, fmt.Errorf("json.Marshal = %v", err)
}
@@ -54,3 +70,11 @@ func MarshalJSON(obj any) ([]byte, error) {
return data, nil
}
}
+
+// MarshalJSONPretty works the same as MarshalJSON but uses ENUM names
+// instead of numbers.
+func MarshalJSONPretty(obj any) ([]byte, error) {
+ marshalOptions := DefaultMarshalOptions
+ marshalOptions.UseEnumNumbers = false
+ return MarshalJSON(obj, marshalOptions)
+}
diff --git a/go/cmd/vtctldclient/cli/shards.go b/go/cmd/vtctldclient/cli/shards.go
index 93d7529d9a8..8ee38eff0d4 100644
--- a/go/cmd/vtctldclient/cli/shards.go
+++ b/go/cmd/vtctldclient/cli/shards.go
@@ -19,7 +19,7 @@ package cli
import (
"sort"
- "vitess.io/vitess/go/mysql"
+ "vitess.io/vitess/go/mysql/replication"
"vitess.io/vitess/go/vt/topo/topoproto"
replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata"
@@ -83,12 +83,12 @@ func (rts rTablets) Less(i, j int) bool {
}
// then compare replication positions
- lpos, err := mysql.DecodePosition(l.Status.Position)
+ lpos, err := replication.DecodePosition(l.Status.Position)
if err != nil {
return true
}
- rpos, err := mysql.DecodePosition(r.Status.Position)
+ rpos, err := replication.DecodePosition(r.Status.Position)
if err != nil {
return false
}
diff --git a/go/cmd/vtctldclient/command/backups.go b/go/cmd/vtctldclient/command/backups.go
index c427a88f1df..e6314ed7d6e 100644
--- a/go/cmd/vtctldclient/command/backups.go
+++ b/go/cmd/vtctldclient/command/backups.go
@@ -35,7 +35,7 @@ import (
var (
// Backup makes a Backup gRPC call to a vtctld.
Backup = &cobra.Command{
- Use: "Backup [--concurrency ] [--allow-primary] ",
+ Use: "Backup [--concurrency ] [--allow-primary] [--incremental-from-pos=|auto] [--upgrade-safe] ",
Short: "Uses the BackupStorage service on the given tablet to create and store a new backup.",
DisableFlagsInUseLine: true,
Args: cobra.ExactArgs(1),
@@ -43,7 +43,7 @@ var (
}
// BackupShard makes a BackupShard gRPC call to a vtctld.
BackupShard = &cobra.Command{
- Use: "BackupShard [--concurrency ] [--allow-primary] ",
+ Use: "BackupShard [--concurrency ] [--allow-primary] [--incremental-from-pos=|auto] [--upgrade-safe] ",
Short: "Finds the most up-to-date REPLICA, RDONLY, or SPARE tablet in the given shard and uses the BackupStorage service on that tablet to create and store a new backup.",
Long: `Finds the most up-to-date REPLICA, RDONLY, or SPARE tablet in the given shard and uses the BackupStorage service on that tablet to create and store a new backup.
@@ -70,7 +70,7 @@ If no replica-type tablet can be found, the backup can be taken on the primary i
}
// RestoreFromBackup makes a RestoreFromBackup gRPC call to a vtctld.
RestoreFromBackup = &cobra.Command{
- Use: "RestoreFromBackup [--backup-timestamp|-t ] ",
+ Use: "RestoreFromBackup [--backup-timestamp|-t ] [--restore-to-pos ] [--dry-run] ",
Short: "Stops mysqld on the specified tablet and restores the data from either the latest backup or closest before `backup-timestamp`.",
DisableFlagsInUseLine: true,
Args: cobra.ExactArgs(1),
@@ -79,8 +79,10 @@ If no replica-type tablet can be found, the backup can be taken on the primary i
)
var backupOptions = struct {
- AllowPrimary bool
- Concurrency uint64
+ AllowPrimary bool
+ Concurrency uint64
+ IncrementalFromPos string
+ UpgradeSafe bool
}{}
func commandBackup(cmd *cobra.Command, args []string) error {
@@ -92,9 +94,11 @@ func commandBackup(cmd *cobra.Command, args []string) error {
cli.FinishedParsing(cmd)
stream, err := client.Backup(commandCtx, &vtctldatapb.BackupRequest{
- TabletAlias: tabletAlias,
- AllowPrimary: backupOptions.AllowPrimary,
- Concurrency: backupOptions.Concurrency,
+ TabletAlias: tabletAlias,
+ AllowPrimary: backupOptions.AllowPrimary,
+ Concurrency: backupOptions.Concurrency,
+ IncrementalFromPos: backupOptions.IncrementalFromPos,
+ UpgradeSafe: backupOptions.UpgradeSafe,
})
if err != nil {
return err
@@ -114,8 +118,10 @@ func commandBackup(cmd *cobra.Command, args []string) error {
}
var backupShardOptions = struct {
- AllowPrimary bool
- Concurrency uint64
+ AllowPrimary bool
+ Concurrency uint64
+ IncrementalFromPos string
+ UpgradeSafe bool
}{}
func commandBackupShard(cmd *cobra.Command, args []string) error {
@@ -127,10 +133,12 @@ func commandBackupShard(cmd *cobra.Command, args []string) error {
cli.FinishedParsing(cmd)
stream, err := client.BackupShard(commandCtx, &vtctldatapb.BackupShardRequest{
- Keyspace: keyspace,
- Shard: shard,
- AllowPrimary: backupShardOptions.AllowPrimary,
- Concurrency: backupShardOptions.Concurrency,
+ Keyspace: keyspace,
+ Shard: shard,
+ AllowPrimary: backupShardOptions.AllowPrimary,
+ Concurrency: backupShardOptions.Concurrency,
+ IncrementalFromPos: backupShardOptions.IncrementalFromPos,
+ UpgradeSafe: backupShardOptions.UpgradeSafe,
})
if err != nil {
return err
@@ -210,7 +218,10 @@ func commandRemoveBackup(cmd *cobra.Command, args []string) error {
}
var restoreFromBackupOptions = struct {
- BackupTimestamp string
+ BackupTimestamp string
+ RestoreToPos string
+ RestoreToTimestamp string
+ DryRun bool
}{}
func commandRestoreFromBackup(cmd *cobra.Command, args []string) error {
@@ -219,8 +230,23 @@ func commandRestoreFromBackup(cmd *cobra.Command, args []string) error {
return err
}
+ if restoreFromBackupOptions.RestoreToPos != "" && restoreFromBackupOptions.RestoreToTimestamp != "" {
+ return fmt.Errorf("--restore-to-pos and --restore-to-timestamp are mutually exclusive")
+ }
+
+ var restoreToTimestamp time.Time
+ if restoreFromBackupOptions.RestoreToTimestamp != "" {
+ restoreToTimestamp, err = mysqlctl.ParseRFC3339(restoreFromBackupOptions.RestoreToTimestamp)
+ if err != nil {
+ return err
+ }
+ }
+
req := &vtctldatapb.RestoreFromBackupRequest{
- TabletAlias: alias,
+ TabletAlias: alias,
+ RestoreToPos: restoreFromBackupOptions.RestoreToPos,
+ RestoreToTimestamp: protoutil.TimeToProto(restoreToTimestamp),
+ DryRun: restoreFromBackupOptions.DryRun,
}
if restoreFromBackupOptions.BackupTimestamp != "" {
@@ -255,10 +281,15 @@ func commandRestoreFromBackup(cmd *cobra.Command, args []string) error {
func init() {
Backup.Flags().BoolVar(&backupOptions.AllowPrimary, "allow-primary", false, "Allow the primary of a shard to be used for the backup. WARNING: If using the builtin backup engine, this will shutdown mysqld on the primary and stop writes for the duration of the backup.")
Backup.Flags().Uint64Var(&backupOptions.Concurrency, "concurrency", 4, "Specifies the number of compression/checksum jobs to run simultaneously.")
+ Backup.Flags().StringVar(&backupOptions.IncrementalFromPos, "incremental-from-pos", "", "Position of previous backup. Default: empty. If given, then this backup becomes an incremental backup from given position. If value is 'auto', backup taken from last successful backup position")
+
+ Backup.Flags().BoolVar(&backupOptions.UpgradeSafe, "upgrade-safe", false, "Whether to use innodb_fast_shutdown=0 for the backup so it is safe to use for MySQL upgrades.")
Root.AddCommand(Backup)
BackupShard.Flags().BoolVar(&backupShardOptions.AllowPrimary, "allow-primary", false, "Allow the primary of a shard to be used for the backup. WARNING: If using the builtin backup engine, this will shutdown mysqld on the primary and stop writes for the duration of the backup.")
BackupShard.Flags().Uint64Var(&backupShardOptions.Concurrency, "concurrency", 4, "Specifies the number of compression/checksum jobs to run simultaneously.")
+ BackupShard.Flags().StringVar(&backupShardOptions.IncrementalFromPos, "incremental-from-pos", "", "Position of previous backup. Default: empty. If given, then this backup becomes an incremental backup from given position. If value is 'auto', backup taken from last successful backup position")
+ BackupShard.Flags().BoolVar(&backupOptions.UpgradeSafe, "upgrade-safe", false, "Whether to use innodb_fast_shutdown=0 for the backup so it is safe to use for MySQL upgrades.")
Root.AddCommand(BackupShard)
GetBackups.Flags().Uint32VarP(&getBackupsOptions.Limit, "limit", "l", 0, "Retrieve only the most recent N backups.")
@@ -268,5 +299,8 @@ func init() {
Root.AddCommand(RemoveBackup)
RestoreFromBackup.Flags().StringVarP(&restoreFromBackupOptions.BackupTimestamp, "backup-timestamp", "t", "", "Use the backup taken at, or closest before, this timestamp. Omit to use the latest backup. Timestamp format is \"YYYY-mm-DD.HHMMSS\".")
+ RestoreFromBackup.Flags().StringVar(&restoreFromBackupOptions.RestoreToPos, "restore-to-pos", "", "Run a point in time recovery that ends with the given position. This will attempt to use one full backup followed by zero or more incremental backups")
+ RestoreFromBackup.Flags().StringVar(&restoreFromBackupOptions.RestoreToTimestamp, "restore-to-timestamp", "", "Run a point in time recovery that restores up to, and excluding, given timestamp in RFC3339 format (`2006-01-02T15:04:05Z07:00`). This will attempt to use one full backup followed by zero or more incremental backups")
+ RestoreFromBackup.Flags().BoolVar(&restoreFromBackupOptions.DryRun, "dry-run", false, "Only validate restore steps, do not actually restore data")
Root.AddCommand(RestoreFromBackup)
}
diff --git a/go/cmd/vtctldclient/command/keyspaces.go b/go/cmd/vtctldclient/command/keyspaces.go
index 393b4795d8f..420c274ddd5 100644
--- a/go/cmd/vtctldclient/command/keyspaces.go
+++ b/go/cmd/vtctldclient/command/keyspaces.go
@@ -24,10 +24,12 @@ import (
"github.com/spf13/cobra"
+ "vitess.io/vitess/go/mysql/sqlerror"
+ "vitess.io/vitess/go/protoutil"
+
"vitess.io/vitess/go/cmd/vtctldclient/cli"
+ "vitess.io/vitess/go/constants/sidecar"
"vitess.io/vitess/go/mysql"
- "vitess.io/vitess/go/vt/logutil"
- "vitess.io/vitess/go/vt/sidecardb"
"vitess.io/vitess/go/vt/topo"
topodatapb "vitess.io/vitess/go/vt/proto/topodata"
@@ -176,7 +178,7 @@ func commandCreateKeyspace(cmd *cobra.Command, args []string) error {
return fmt.Errorf("--snapshot-time cannot be in the future; snapshot = %v, now = %v", t, now)
}
- snapshotTime = logutil.TimeToProto(t)
+ snapshotTime = protoutil.TimeToProto(t)
}
createKeyspaceOptions.SidecarDBName = strings.TrimSpace(createKeyspaceOptions.SidecarDBName)
@@ -184,7 +186,7 @@ func commandCreateKeyspace(cmd *cobra.Command, args []string) error {
return errors.New("--sidecar-db-name cannot be empty when creating a keyspace")
}
if len(createKeyspaceOptions.SidecarDBName) > mysql.MaxIdentifierLength {
- return mysql.NewSQLError(mysql.ERTooLongIdent, mysql.SSDataTooLong, "--sidecar-db-name identifier value of %q is too long (%d chars), max length for database identifiers is %d characters",
+ return sqlerror.NewSQLError(sqlerror.ERTooLongIdent, sqlerror.SSDataTooLong, "--sidecar-db-name identifier value of %q is too long (%d chars), max length for database identifiers is %d characters",
createKeyspaceOptions.SidecarDBName, len(createKeyspaceOptions.SidecarDBName), mysql.MaxIdentifierLength)
}
@@ -425,7 +427,7 @@ func init() {
CreateKeyspace.Flags().StringVar(&createKeyspaceOptions.BaseKeyspace, "base-keyspace", "", "The base keyspace for a snapshot keyspace.")
CreateKeyspace.Flags().StringVar(&createKeyspaceOptions.SnapshotTimestamp, "snapshot-timestamp", "", "The snapshot time for a snapshot keyspace, as a timestamp in RFC3339 format.")
CreateKeyspace.Flags().StringVar(&createKeyspaceOptions.DurabilityPolicy, "durability-policy", "none", "Type of durability to enforce for this keyspace. Default is none. Possible values include 'semi_sync' and others as dictated by registered plugins.")
- CreateKeyspace.Flags().StringVar(&createKeyspaceOptions.SidecarDBName, "sidecar-db-name", sidecardb.DefaultName, "(Experimental) Name of the Vitess sidecar database that tablets in this keyspace will use for internal metadata.")
+ CreateKeyspace.Flags().StringVar(&createKeyspaceOptions.SidecarDBName, "sidecar-db-name", sidecar.DefaultName, "(Experimental) Name of the Vitess sidecar database that tablets in this keyspace will use for internal metadata.")
Root.AddCommand(CreateKeyspace)
DeleteKeyspace.Flags().BoolVarP(&deleteKeyspaceOptions.Recursive, "recursive", "r", false, "Recursively delete all shards in the keyspace, and all tablets in those shards.")
diff --git a/go/cmd/vtctldclient/command/onlineddl.go b/go/cmd/vtctldclient/command/onlineddl.go
new file mode 100644
index 00000000000..dbe927de2bf
--- /dev/null
+++ b/go/cmd/vtctldclient/command/onlineddl.go
@@ -0,0 +1,404 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package command
+
+import (
+ "fmt"
+ "os"
+ "strings"
+ "time"
+
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/cmd/vtctldclient/cli"
+ "vitess.io/vitess/go/protoutil"
+ "vitess.io/vitess/go/sqltypes"
+ "vitess.io/vitess/go/vt/schema"
+ "vitess.io/vitess/go/vt/vtctl/schematools"
+ "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle"
+ "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp"
+
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+ vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata"
+)
+
+const (
+ AllMigrationsIndicator = "all"
+)
+
+var (
+ OnlineDDL = &cobra.Command{
+ Use: "OnlineDDL [args]",
+ Short: "Operates on online DDL (schema migrations).",
+ DisableFlagsInUseLine: true,
+ Args: cobra.MinimumNArgs(2),
+ }
+ OnlineDDLCancel = &cobra.Command{
+ Use: "cancel ",
+ Short: "Cancel one or all migrations, terminating any running ones as needed.",
+ Example: "OnlineDDL cancel test_keyspace 82fa54ac_e83e_11ea_96b7_f875a4d24e90",
+ DisableFlagsInUseLine: true,
+ Args: cobra.ExactArgs(2),
+ RunE: commandOnlineDDLCancel,
+ }
+ OnlineDDLCleanup = &cobra.Command{
+ Use: "cleanup ",
+ Short: "Mark a given schema migration ready for artifact cleanup.",
+ Example: "OnlineDDL cleanup test_keyspace 82fa54ac_e83e_11ea_96b7_f875a4d24e90",
+ DisableFlagsInUseLine: true,
+ Args: cobra.ExactArgs(2),
+ RunE: commandOnlineDDLCleanup,
+ }
+ OnlineDDLComplete = &cobra.Command{
+ Use: "complete ",
+ Short: "Complete one or all migrations executed with --postpone-completion",
+ Example: "OnlineDDL complete test_keyspace 82fa54ac_e83e_11ea_96b7_f875a4d24e90",
+ DisableFlagsInUseLine: true,
+ Args: cobra.ExactArgs(2),
+ RunE: commandOnlineDDLComplete,
+ }
+ OnlineDDLLaunch = &cobra.Command{
+ Use: "launch ",
+ Short: "Launch one or all migrations executed with --postpone-launch",
+ Example: "OnlineDDL launch test_keyspace 82fa54ac_e83e_11ea_96b7_f875a4d24e90",
+ DisableFlagsInUseLine: true,
+ Args: cobra.ExactArgs(2),
+ RunE: commandOnlineDDLLaunch,
+ }
+ OnlineDDLRetry = &cobra.Command{
+ Use: "retry ",
+ Short: "Mark a given schema migration for retry.",
+ Example: "vtctl OnlineDDL retry test_keyspace 82fa54ac_e83e_11ea_96b7_f875a4d24e90",
+ DisableFlagsInUseLine: true,
+ Args: cobra.ExactArgs(2),
+ RunE: commandOnlineDDLRetry,
+ }
+ OnlineDDLThrottle = &cobra.Command{
+ Use: "throttle ",
+ Short: "Throttles one or all migrations",
+ Example: "OnlineDDL throttle all",
+ DisableFlagsInUseLine: true,
+ Args: cobra.ExactArgs(2),
+ RunE: commandOnlineDDLThrottle,
+ }
+ OnlineDDLUnthrottle = &cobra.Command{
+ Use: "unthrottle ",
+ Short: "Unthrottles one or all migrations",
+ Example: "OnlineDDL unthrottle all",
+ DisableFlagsInUseLine: true,
+ Args: cobra.ExactArgs(2),
+ RunE: commandOnlineDDLUnthrottle,
+ }
+ OnlineDDLShow = &cobra.Command{
+ Use: "show",
+ Short: "Display information about online DDL operations.",
+ Example: `OnlineDDL show test_keyspace 82fa54ac_e83e_11ea_96b7_f875a4d24e90
+OnlineDDL show test_keyspace all
+OnlineDDL show --order descending test_keyspace all
+OnlineDDL show --limit 10 test_keyspace all
+OnlineDDL show --skip 5 --limit 10 test_keyspace all
+OnlineDDL show test_keyspace running
+OnlineDDL show test_keyspace complete
+OnlineDDL show test_keyspace failed`,
+ DisableFlagsInUseLine: true,
+ Args: cobra.RangeArgs(1, 2),
+ RunE: commandOnlineDDLShow,
+ }
+)
+
+// analyzeOnlineDDLCommandWithUuidOrAllArgument is a general helper function for OnlineDDL commands that
+// accept either a valid UUID or the "all" argument.
+func analyzeOnlineDDLCommandWithUuidOrAllArgument(cmd *cobra.Command) (keyspace, uuid string, err error) {
+ keyspace = cmd.Flags().Arg(0)
+ uuid = cmd.Flags().Arg(1)
+
+ switch {
+ case strings.ToLower(uuid) == AllMigrationsIndicator:
+ case schema.IsOnlineDDLUUID(uuid):
+ default:
+ return "", "", fmt.Errorf("argument must be 'all' or a valid UUID. Got '%s'", uuid)
+ }
+ return keyspace, uuid, nil
+}
+
+func commandOnlineDDLCancel(cmd *cobra.Command, args []string) error {
+ keyspace, uuid, err := analyzeOnlineDDLCommandWithUuidOrAllArgument(cmd)
+ if err != nil {
+ return err
+ }
+ cli.FinishedParsing(cmd)
+
+ resp, err := client.CancelSchemaMigration(commandCtx, &vtctldatapb.CancelSchemaMigrationRequest{
+ Keyspace: keyspace,
+ Uuid: uuid,
+ })
+ if err != nil {
+ return err
+ }
+
+ data, err := cli.MarshalJSON(resp)
+ if err != nil {
+ return err
+ }
+
+ fmt.Printf("%s\n", data)
+ return nil
+}
+
+func commandOnlineDDLCleanup(cmd *cobra.Command, args []string) error {
+ keyspace := cmd.Flags().Arg(0)
+ uuid := cmd.Flags().Arg(1)
+ if !schema.IsOnlineDDLUUID(uuid) {
+ return fmt.Errorf("%s is not a valid UUID", uuid)
+ }
+
+ cli.FinishedParsing(cmd)
+
+ resp, err := client.CleanupSchemaMigration(commandCtx, &vtctldatapb.CleanupSchemaMigrationRequest{
+ Keyspace: keyspace,
+ Uuid: uuid,
+ })
+ if err != nil {
+ return err
+ }
+
+ data, err := cli.MarshalJSON(resp)
+ if err != nil {
+ return err
+ }
+
+ fmt.Printf("%s\n", data)
+ return nil
+}
+
+func commandOnlineDDLComplete(cmd *cobra.Command, args []string) error {
+ keyspace, uuid, err := analyzeOnlineDDLCommandWithUuidOrAllArgument(cmd)
+ if err != nil {
+ return err
+ }
+ cli.FinishedParsing(cmd)
+
+ resp, err := client.CompleteSchemaMigration(commandCtx, &vtctldatapb.CompleteSchemaMigrationRequest{
+ Keyspace: keyspace,
+ Uuid: uuid,
+ })
+ if err != nil {
+ return err
+ }
+
+ data, err := cli.MarshalJSON(resp)
+ if err != nil {
+ return err
+ }
+
+ fmt.Printf("%s\n", data)
+ return nil
+}
+
+func commandOnlineDDLLaunch(cmd *cobra.Command, args []string) error {
+ keyspace, uuid, err := analyzeOnlineDDLCommandWithUuidOrAllArgument(cmd)
+ if err != nil {
+ return err
+ }
+ cli.FinishedParsing(cmd)
+
+ resp, err := client.LaunchSchemaMigration(commandCtx, &vtctldatapb.LaunchSchemaMigrationRequest{
+ Keyspace: keyspace,
+ Uuid: uuid,
+ })
+ if err != nil {
+ return err
+ }
+
+ data, err := cli.MarshalJSON(resp)
+ if err != nil {
+ return err
+ }
+
+ fmt.Printf("%s\n", data)
+ return nil
+}
+
+func commandOnlineDDLRetry(cmd *cobra.Command, args []string) error {
+ keyspace := cmd.Flags().Arg(0)
+ uuid := cmd.Flags().Arg(1)
+ if !schema.IsOnlineDDLUUID(uuid) {
+ return fmt.Errorf("%s is not a valid UUID", uuid)
+ }
+
+ cli.FinishedParsing(cmd)
+
+ resp, err := client.RetrySchemaMigration(commandCtx, &vtctldatapb.RetrySchemaMigrationRequest{
+ Keyspace: keyspace,
+ Uuid: uuid,
+ })
+ if err != nil {
+ return err
+ }
+
+ data, err := cli.MarshalJSON(resp)
+ if err != nil {
+ return err
+ }
+
+ fmt.Printf("%s\n", data)
+ return nil
+}
+
+// throttleCommandHelper is a helper function that implements the logic for both
+// commandOnlineDDLThrottle and commandOnlineDDLUnthrottle ; the only difference between the two
+// is the ThrottledApp *rule* sent in UpdateThrottlerConfigRequest.
+// input: `throttleType`: true stands for "throttle", `false` stands for "unthrottle"
+func throttleCommandHelper(cmd *cobra.Command, throttleType bool) error {
+ keyspace, uuid, err := analyzeOnlineDDLCommandWithUuidOrAllArgument(cmd)
+ if err != nil {
+ return err
+ }
+ cli.FinishedParsing(cmd)
+
+ var rule topodatapb.ThrottledAppRule
+ if throttleType {
+ rule.Ratio = throttle.DefaultThrottleRatio
+ rule.ExpiresAt = protoutil.TimeToProto(time.Now().Add(throttle.DefaultAppThrottleDuration))
+ } else {
+ rule.Ratio = 0
+ rule.ExpiresAt = protoutil.TimeToProto(time.Now())
+ }
+
+ if strings.ToLower(uuid) == AllMigrationsIndicator {
+ rule.Name = throttlerapp.OnlineDDLName.String()
+ } else {
+ rule.Name = uuid
+ }
+
+ updateThrottlerConfigOptions := vtctldatapb.UpdateThrottlerConfigRequest{
+ Keyspace: keyspace,
+ ThrottledApp: &rule,
+ }
+ resp, err := client.UpdateThrottlerConfig(commandCtx, &updateThrottlerConfigOptions)
+ if err != nil {
+ return err
+ }
+
+ data, err := cli.MarshalJSON(resp)
+ if err != nil {
+ return err
+ }
+
+ fmt.Printf("%s\n", data)
+ return nil
+}
+
+// commandOnlineDDLThrottle throttles one or multiple migrations.
+// As opposed to *most* OnlineDDL functions, this functionality does not end up calling a gRPC on tablets.
+// Instead, it updates Keyspace and SrvKeyspace entries, on which the tablets listen.
+func commandOnlineDDLThrottle(cmd *cobra.Command, args []string) error {
+ return throttleCommandHelper(cmd, true)
+}
+
+// commandOnlineDDLUnthrottle unthrottles one or multiple migrations.
+// As opposed to *most* OnlineDDL functions, this functionality does not end up calling a gRPC on tablets.
+// Instead, it updates Keyspace and SrvKeyspace entries, on which the tablets listen.
+func commandOnlineDDLUnthrottle(cmd *cobra.Command, args []string) error {
+ return throttleCommandHelper(cmd, false)
+}
+
+var onlineDDLShowArgs = struct {
+ JSON bool
+ OrderStr string
+ Limit uint64
+ Skip uint64
+}{
+ OrderStr: "ascending",
+}
+
+func commandOnlineDDLShow(cmd *cobra.Command, args []string) error {
+ var order vtctldatapb.QueryOrdering
+ switch strings.ToLower(onlineDDLShowArgs.OrderStr) {
+ case "":
+ order = vtctldatapb.QueryOrdering_NONE
+ case "asc", "ascending":
+ order = vtctldatapb.QueryOrdering_ASCENDING
+ case "desc", "descending":
+ order = vtctldatapb.QueryOrdering_DESCENDING
+ default:
+ return fmt.Errorf("invalid ordering %s (choices are 'asc', 'ascending', 'desc', 'descending')", onlineDDLShowArgs.OrderStr)
+ }
+
+ cli.FinishedParsing(cmd)
+
+ req := &vtctldatapb.GetSchemaMigrationsRequest{
+ Keyspace: cmd.Flags().Arg(0),
+ Order: order,
+ Limit: onlineDDLShowArgs.Limit,
+ Skip: onlineDDLShowArgs.Skip,
+ }
+
+ switch arg := cmd.Flags().Arg(1); arg {
+ case "", "all":
+ case "recent":
+ req.Recent = protoutil.DurationToProto(7 * 24 * time.Hour)
+ default:
+ if status, err := schematools.ParseSchemaMigrationStatus(arg); err == nil {
+ // Argument is a status name.
+ req.Status = status
+ } else if schema.IsOnlineDDLUUID(arg) {
+ req.Uuid = arg
+ } else {
+ req.MigrationContext = arg
+ }
+ }
+
+ resp, err := client.GetSchemaMigrations(commandCtx, req)
+ if err != nil {
+ return err
+ }
+
+ switch {
+ case onlineDDLShowArgs.JSON:
+ data, err := cli.MarshalJSON(resp)
+ if err != nil {
+ return err
+ }
+ fmt.Printf("%s\n", data)
+ default:
+ res, err := sqltypes.MarshalResult(schematools.MarshallableSchemaMigrations(resp.Migrations))
+ if err != nil {
+ return err
+ }
+
+ cli.WriteQueryResultTable(os.Stdout, res)
+ }
+ return nil
+}
+
+func init() {
+ OnlineDDL.AddCommand(OnlineDDLCancel)
+ OnlineDDL.AddCommand(OnlineDDLCleanup)
+ OnlineDDL.AddCommand(OnlineDDLComplete)
+ OnlineDDL.AddCommand(OnlineDDLLaunch)
+ OnlineDDL.AddCommand(OnlineDDLRetry)
+ OnlineDDL.AddCommand(OnlineDDLThrottle)
+ OnlineDDL.AddCommand(OnlineDDLUnthrottle)
+
+ OnlineDDLShow.Flags().BoolVar(&onlineDDLShowArgs.JSON, "json", false, "Output JSON instead of human-readable table.")
+ OnlineDDLShow.Flags().StringVar(&onlineDDLShowArgs.OrderStr, "order", "asc", "Sort the results by `id` property of the Schema migration.")
+ OnlineDDLShow.Flags().Uint64Var(&onlineDDLShowArgs.Limit, "limit", 0, "Limit number of rows returned in output.")
+ OnlineDDLShow.Flags().Uint64Var(&onlineDDLShowArgs.Skip, "skip", 0, "Skip specified number of rows returned in output.")
+
+ OnlineDDL.AddCommand(OnlineDDLShow)
+ Root.AddCommand(OnlineDDL)
+}
diff --git a/go/cmd/vtctldclient/command/reparents.go b/go/cmd/vtctldclient/command/reparents.go
index 6483d699457..5c83016701a 100644
--- a/go/cmd/vtctldclient/command/reparents.go
+++ b/go/cmd/vtctldclient/command/reparents.go
@@ -94,6 +94,7 @@ var emergencyReparentShardOptions = struct {
NewPrimaryAliasStr string
IgnoreReplicaAliasStrList []string
PreventCrossCellPromotion bool
+ WaitForAllTablets bool
}{}
func commandEmergencyReparentShard(cmd *cobra.Command, args []string) error {
@@ -132,6 +133,7 @@ func commandEmergencyReparentShard(cmd *cobra.Command, args []string) error {
IgnoreReplicas: ignoreReplicaAliases,
WaitReplicasTimeout: protoutil.DurationToProto(emergencyReparentShardOptions.WaitReplicasTimeout),
PreventCrossCellPromotion: emergencyReparentShardOptions.PreventCrossCellPromotion,
+ WaitForAllTablets: emergencyReparentShardOptions.WaitForAllTablets,
})
if err != nil {
return err
@@ -281,6 +283,7 @@ func init() {
EmergencyReparentShard.Flags().DurationVar(&emergencyReparentShardOptions.WaitReplicasTimeout, "wait-replicas-timeout", topo.RemoteOperationTimeout, "Time to wait for replicas to catch up in reparenting.")
EmergencyReparentShard.Flags().StringVar(&emergencyReparentShardOptions.NewPrimaryAliasStr, "new-primary", "", "Alias of a tablet that should be the new primary. If not specified, the vtctld will select the best candidate to promote.")
EmergencyReparentShard.Flags().BoolVar(&emergencyReparentShardOptions.PreventCrossCellPromotion, "prevent-cross-cell-promotion", false, "Only promotes a new primary from the same cell as the previous primary.")
+ EmergencyReparentShard.Flags().BoolVar(&emergencyReparentShardOptions.WaitForAllTablets, "wait-for-all-tablets", false, "Should ERS wait for all the tablets to respond. Useful when all the tablets are reachable.")
EmergencyReparentShard.Flags().StringSliceVarP(&emergencyReparentShardOptions.IgnoreReplicaAliasStrList, "ignore-replicas", "i", nil, "Comma-separated, repeated list of replica tablet aliases to ignore during the emergency reparent.")
Root.AddCommand(EmergencyReparentShard)
diff --git a/go/cmd/vtctldclient/command/root.go b/go/cmd/vtctldclient/command/root.go
index 9e59276993c..1194b49ec8f 100644
--- a/go/cmd/vtctldclient/command/root.go
+++ b/go/cmd/vtctldclient/command/root.go
@@ -30,19 +30,33 @@ import (
"vitess.io/vitess/go/vt/logutil"
"vitess.io/vitess/go/vt/servenv"
"vitess.io/vitess/go/vt/vtctl/vtctldclient"
+
+ // These imports ensure init()s within them get called and they register their commands/subcommands.
+ "vitess.io/vitess/go/cmd/vtctldclient/cli"
+ vreplcommon "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common"
+ _ "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/lookupvindex"
+ _ "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/materialize"
+ _ "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/migrate"
+ _ "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/mount"
+ _ "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/movetables"
+ _ "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/reshard"
+ _ "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/vdiff"
+ _ "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/workflow"
)
var (
// VtctldClientProtocol is the protocol to use when creating the vtctldclient.VtctldClient.
VtctldClientProtocol = "grpc"
- client vtctldclient.VtctldClient
- traceCloser io.Closer
+ client vtctldclient.VtctldClient
+ traceCloser io.Closer
+
commandCtx context.Context
commandCancel func()
server string
actionTimeout time.Duration
+ compactOutput bool
// Root is the main entrypoint to the vtctldclient CLI.
Root = &cobra.Command{
@@ -59,6 +73,11 @@ var (
ctx = context.Background()
}
commandCtx, commandCancel = context.WithTimeout(ctx, actionTimeout)
+ if compactOutput {
+ cli.DefaultMarshalOptions.EmitUnpopulated = false
+ }
+ vreplcommon.SetClient(client)
+ vreplcommon.SetCommandCtx(commandCtx)
return err
},
// Similarly, PersistentPostRun cleans up the resources spawned by
@@ -122,6 +141,13 @@ func getClientForCommand(cmd *cobra.Command) (vtctldclient.VtctldClient, error)
}
}
+ // Reserved cobra commands for shell completion that we don't want to fail
+ // here.
+ switch {
+ case cmd.Name() == "__complete", cmd.Parent() != nil && cmd.Parent().Name() == "completion":
+ return nil, nil
+ }
+
if VtctldClientProtocol != "local" && server == "" {
return nil, errNoServer
}
@@ -130,6 +156,8 @@ func getClientForCommand(cmd *cobra.Command) (vtctldclient.VtctldClient, error)
}
func init() {
- Root.PersistentFlags().StringVar(&server, "server", "", "server to use for connection (required)")
- Root.PersistentFlags().DurationVar(&actionTimeout, "action_timeout", time.Hour, "timeout for the total command")
+ Root.PersistentFlags().StringVar(&server, "server", "", "server to use for the connection (required)")
+ Root.PersistentFlags().DurationVar(&actionTimeout, "action_timeout", time.Hour, "timeout to use for the command")
+ Root.PersistentFlags().BoolVar(&compactOutput, "compact", false, "use compact format for otherwise verbose outputs")
+ vreplcommon.RegisterCommands(Root)
}
diff --git a/go/cmd/vtctldclient/command/schema.go b/go/cmd/vtctldclient/command/schema.go
index a2d7843756d..795b1315e89 100644
--- a/go/cmd/vtctldclient/command/schema.go
+++ b/go/cmd/vtctldclient/command/schema.go
@@ -31,7 +31,7 @@ import (
"vitess.io/vitess/go/vt/schema"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/topo/topoproto"
- "vitess.io/vitess/go/vt/wrangler"
+ "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver"
vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata"
"vitess.io/vitess/go/vt/proto/vtrpc"
@@ -40,7 +40,7 @@ import (
var (
// ApplySchema makes an ApplySchema gRPC call to a vtctld.
ApplySchema = &cobra.Command{
- Use: "ApplySchema [--allow-long-unavailability] [--ddl-strategy ] [--uuid ...] [--migration-context ] [--wait-replicas-timeout ] [--caller-id ] {--sql-file | --sql } ",
+ Use: "ApplySchema [--ddl-strategy ] [--uuid ...] [--migration-context ] [--wait-replicas-timeout ] [--caller-id ] {--sql-file | --sql } ",
Short: "Applies the schema change to the specified keyspace on every primary, running in parallel on all shards. The changes are then propagated to replicas via replication.",
Long: `Applies the schema change to the specified keyspace on every primary, running in parallel on all shards. The changes are then propagated to replicas via replication.
@@ -103,6 +103,7 @@ var applySchemaOptions = struct {
WaitReplicasTimeout time.Duration
SkipPreflight bool
CallerID string
+ BatchSize int64
}{}
func commandApplySchema(cmd *cobra.Command, args []string) error {
@@ -137,15 +138,14 @@ func commandApplySchema(cmd *cobra.Command, args []string) error {
ks := cmd.Flags().Arg(0)
resp, err := client.ApplySchema(commandCtx, &vtctldatapb.ApplySchemaRequest{
- Keyspace: ks,
- AllowLongUnavailability: applySchemaOptions.AllowLongUnavailability,
- DdlStrategy: applySchemaOptions.DDLStrategy,
- Sql: parts,
- SkipPreflight: true,
- UuidList: applySchemaOptions.UUIDList,
- MigrationContext: applySchemaOptions.MigrationContext,
- WaitReplicasTimeout: protoutil.DurationToProto(applySchemaOptions.WaitReplicasTimeout),
- CallerId: cid,
+ Keyspace: ks,
+ DdlStrategy: applySchemaOptions.DDLStrategy,
+ Sql: parts,
+ UuidList: applySchemaOptions.UUIDList,
+ MigrationContext: applySchemaOptions.MigrationContext,
+ WaitReplicasTimeout: protoutil.DurationToProto(applySchemaOptions.WaitReplicasTimeout),
+ CallerId: cid,
+ BatchSize: applySchemaOptions.BatchSize,
})
if err != nil {
return err
@@ -286,15 +286,16 @@ func commandReloadSchemaShard(cmd *cobra.Command, args []string) error {
}
func init() {
- ApplySchema.Flags().MarkDeprecated("--skip-preflight", "Deprecated. Assumed to be always 'true'")
- ApplySchema.Flags().BoolVar(&applySchemaOptions.AllowLongUnavailability, "allow-long-unavailability", false, "Allow large schema changes which incur a longer unavailability of the database.")
+ ApplySchema.Flags().Bool("allow-long-unavailability", false, "Deprecated and has no effect.")
+ ApplySchema.Flags().MarkDeprecated("--allow-long-unavailability", "")
ApplySchema.Flags().StringVar(&applySchemaOptions.DDLStrategy, "ddl-strategy", string(schema.DDLStrategyDirect), "Online DDL strategy, compatible with @@ddl_strategy session variable (examples: 'gh-ost', 'pt-osc', 'gh-ost --max-load=Threads_running=100'.")
ApplySchema.Flags().StringSliceVar(&applySchemaOptions.UUIDList, "uuid", nil, "Optional, comma-delimited, repeatable, explicit UUIDs for migration. If given, must match number of DDL changes.")
ApplySchema.Flags().StringVar(&applySchemaOptions.MigrationContext, "migration-context", "", "For Online DDL, optionally supply a custom unique string used as context for the migration(s) in this command. By default a unique context is auto-generated by Vitess.")
- ApplySchema.Flags().DurationVar(&applySchemaOptions.WaitReplicasTimeout, "wait-replicas-timeout", wrangler.DefaultWaitReplicasTimeout, "Amount of time to wait for replicas to receive the schema change via replication.")
+ ApplySchema.Flags().DurationVar(&applySchemaOptions.WaitReplicasTimeout, "wait-replicas-timeout", grpcvtctldserver.DefaultWaitReplicasTimeout, "Amount of time to wait for replicas to receive the schema change via replication.")
ApplySchema.Flags().StringVar(&applySchemaOptions.CallerID, "caller-id", "", "Effective caller ID used for the operation and should map to an ACL name which grants this identity the necessary permissions to perform the operation (this is only necessary when strict table ACLs are used).")
ApplySchema.Flags().StringArrayVar(&applySchemaOptions.SQL, "sql", nil, "Semicolon-delimited, repeatable SQL commands to apply. Exactly one of --sql|--sql-file is required.")
ApplySchema.Flags().StringVar(&applySchemaOptions.SQLFile, "sql-file", "", "Path to a file containing semicolon-delimited SQL commands to apply. Exactly one of --sql|--sql-file is required.")
+ ApplySchema.Flags().Int64Var(&applySchemaOptions.BatchSize, "batch-size", 0, "How many queries to batch together. Only applicable when all queries are CREATE TABLE|VIEW")
Root.AddCommand(ApplySchema)
diff --git a/go/cmd/vtctldclient/command/throttler.go b/go/cmd/vtctldclient/command/throttler.go
index b0dbd663013..9783f76720d 100644
--- a/go/cmd/vtctldclient/command/throttler.go
+++ b/go/cmd/vtctldclient/command/throttler.go
@@ -17,17 +17,23 @@ limitations under the License.
package command
import (
+ "fmt"
+ "time"
+
"github.com/spf13/cobra"
"vitess.io/vitess/go/cmd/vtctldclient/cli"
+ "vitess.io/vitess/go/protoutil"
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata"
+ "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle"
)
var (
// UpdateThrottlerConfig makes a UpdateThrottlerConfig gRPC call to a vtctld.
UpdateThrottlerConfig = &cobra.Command{
- Use: "UpdateThrottlerConfig [--enable|--disable] [--threshold=] [--custom-query=] [--check-as-check-self|--check-as-check-shard] ",
+ Use: "UpdateThrottlerConfig [--enable|--disable] [--threshold=] [--custom-query=] [--check-as-check-self|--check-as-check-shard] [--throttle-app|unthrottle-app=] [--throttle-app-ratio=] [--throttle-app-duration=] ",
Short: "Update the tablet throttler configuration for all tablets in the given keyspace (across all cells)",
DisableFlagsInUseLine: true,
Args: cobra.ExactArgs(1),
@@ -35,14 +41,32 @@ var (
}
)
-var updateThrottlerConfigOptions vtctldatapb.UpdateThrottlerConfigRequest
+var (
+ updateThrottlerConfigOptions vtctldatapb.UpdateThrottlerConfigRequest
+ throttledAppRule topodatapb.ThrottledAppRule
+ unthrottledAppRule topodatapb.ThrottledAppRule
+ throttledAppDuration time.Duration
+)
func commandUpdateThrottlerConfig(cmd *cobra.Command, args []string) error {
keyspace := cmd.Flags().Arg(0)
cli.FinishedParsing(cmd)
+ if throttledAppRule.Name != "" && unthrottledAppRule.Name != "" {
+ return fmt.Errorf("throttle-app and unthrottle-app are mutually exclusive")
+ }
+
updateThrottlerConfigOptions.CustomQuerySet = cmd.Flags().Changed("custom-query")
updateThrottlerConfigOptions.Keyspace = keyspace
+
+ if throttledAppRule.Name != "" {
+ throttledAppRule.ExpiresAt = protoutil.TimeToProto(time.Now().Add(throttledAppDuration))
+ updateThrottlerConfigOptions.ThrottledApp = &throttledAppRule
+ } else if unthrottledAppRule.Name != "" {
+ unthrottledAppRule.ExpiresAt = protoutil.TimeToProto(time.Now())
+ updateThrottlerConfigOptions.ThrottledApp = &unthrottledAppRule
+ }
+
_, err := client.UpdateThrottlerConfig(commandCtx, &updateThrottlerConfigOptions)
if err != nil {
return err
@@ -57,5 +81,12 @@ func init() {
UpdateThrottlerConfig.Flags().StringVar(&updateThrottlerConfigOptions.CustomQuery, "custom-query", "", "custom throttler check query")
UpdateThrottlerConfig.Flags().BoolVar(&updateThrottlerConfigOptions.CheckAsCheckSelf, "check-as-check-self", false, "/throttler/check requests behave as is /throttler/check-self was called")
UpdateThrottlerConfig.Flags().BoolVar(&updateThrottlerConfigOptions.CheckAsCheckShard, "check-as-check-shard", false, "use standard behavior for /throttler/check requests")
+
+ UpdateThrottlerConfig.Flags().StringVar(&unthrottledAppRule.Name, "unthrottle-app", "", "an app name to unthrottle")
+ UpdateThrottlerConfig.Flags().StringVar(&throttledAppRule.Name, "throttle-app", "", "an app name to throttle")
+ UpdateThrottlerConfig.Flags().Float64Var(&throttledAppRule.Ratio, "throttle-app-ratio", throttle.DefaultThrottleRatio, "ratio to throttle app (app specififed in --throttled-app)")
+ UpdateThrottlerConfig.Flags().DurationVar(&throttledAppDuration, "throttle-app-duration", throttle.DefaultAppThrottleDuration, "duration after which throttled app rule expires (app specififed in --throttled-app)")
+ UpdateThrottlerConfig.Flags().BoolVar(&throttledAppRule.Exempt, "throttle-app-exempt", throttledAppRule.Exempt, "exempt this app from being at all throttled. WARNING: use with extreme care, as this is likely to push metrics beyond the throttler's threshold, and starve other apps")
+
Root.AddCommand(UpdateThrottlerConfig)
}
diff --git a/go/cmd/vtctldclient/command/vreplication/common/cancel.go b/go/cmd/vtctldclient/command/vreplication/common/cancel.go
new file mode 100644
index 00000000000..48abcc89584
--- /dev/null
+++ b/go/cmd/vtctldclient/command/vreplication/common/cancel.go
@@ -0,0 +1,83 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package common
+
+import (
+ "fmt"
+ "sort"
+
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/cmd/vtctldclient/cli"
+
+ vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata"
+)
+
+var CancelOptions = struct {
+ KeepData bool
+ KeepRoutingRules bool
+}{}
+
+func GetCancelCommand(opts *SubCommandsOpts) *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "cancel",
+ Short: fmt.Sprintf("Cancel a %s VReplication workflow.", opts.SubCommand),
+ Example: fmt.Sprintf(`vtctldclient --server localhost:15999 %s --workflow %s --target-keyspace customer cancel`, opts.SubCommand, opts.Workflow),
+ DisableFlagsInUseLine: true,
+ Aliases: []string{"Cancel"},
+ Args: cobra.NoArgs,
+ RunE: commandCancel,
+ }
+ return cmd
+}
+
+func commandCancel(cmd *cobra.Command, args []string) error {
+ format, err := GetOutputFormat(cmd)
+ if err != nil {
+ return err
+ }
+
+ cli.FinishedParsing(cmd)
+
+ req := &vtctldatapb.WorkflowDeleteRequest{
+ Keyspace: BaseOptions.TargetKeyspace,
+ Workflow: BaseOptions.Workflow,
+ KeepData: CancelOptions.KeepData,
+ KeepRoutingRules: CancelOptions.KeepRoutingRules,
+ }
+ resp, err := GetClient().WorkflowDelete(GetCommandCtx(), req)
+ if err != nil {
+ return err
+ }
+
+ var output []byte
+ if format == "json" {
+ // Sort the inner TabletInfo slice for deterministic output.
+ sort.Slice(resp.Details, func(i, j int) bool {
+ return resp.Details[i].Tablet.String() < resp.Details[j].Tablet.String()
+ })
+ output, err = cli.MarshalJSONPretty(resp)
+ if err != nil {
+ return err
+ }
+ } else {
+ output = []byte(resp.Summary + "\n")
+ }
+ fmt.Printf("%s\n", output)
+
+ return nil
+}
diff --git a/go/cmd/vtctldclient/command/vreplication/common/complete.go b/go/cmd/vtctldclient/command/vreplication/common/complete.go
new file mode 100644
index 00000000000..6e210b188fe
--- /dev/null
+++ b/go/cmd/vtctldclient/command/vreplication/common/complete.go
@@ -0,0 +1,75 @@
+package common
+
+import (
+ "bytes"
+ "fmt"
+
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/cmd/vtctldclient/cli"
+
+ vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata"
+)
+
+var CompleteOptions = struct {
+ KeepData bool
+ KeepRoutingRules bool
+ RenameTables bool
+ DryRun bool
+}{}
+
+func GetCompleteCommand(opts *SubCommandsOpts) *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "complete",
+ Short: fmt.Sprintf("Complete a %s VReplication workflow.", opts.SubCommand),
+ Example: fmt.Sprintf(`vtctldclient --server localhost:15999 %s --workflow %s --target-keyspace customer complete`,
+ opts.SubCommand, opts.Workflow),
+ DisableFlagsInUseLine: true,
+ Aliases: []string{"Complete"},
+ Args: cobra.NoArgs,
+ RunE: commandComplete,
+ }
+ return cmd
+}
+
+func commandComplete(cmd *cobra.Command, args []string) error {
+ format, err := GetOutputFormat(cmd)
+ if err != nil {
+ return err
+ }
+ cli.FinishedParsing(cmd)
+
+ req := &vtctldatapb.MoveTablesCompleteRequest{
+ Workflow: BaseOptions.Workflow,
+ TargetKeyspace: BaseOptions.TargetKeyspace,
+ KeepData: CompleteOptions.KeepData,
+ KeepRoutingRules: CompleteOptions.KeepRoutingRules,
+ RenameTables: CompleteOptions.RenameTables,
+ DryRun: CompleteOptions.DryRun,
+ }
+ resp, err := GetClient().MoveTablesComplete(GetCommandCtx(), req)
+ if err != nil {
+ return err
+ }
+
+ var output []byte
+ if format == "json" {
+ output, err = cli.MarshalJSONPretty(resp)
+ if err != nil {
+ return err
+ }
+ } else {
+ tout := bytes.Buffer{}
+ tout.WriteString(resp.Summary + "\n")
+ if len(resp.DryRunResults) > 0 {
+ tout.WriteString("\n")
+ for _, r := range resp.DryRunResults {
+ tout.WriteString(r + "\n")
+ }
+ }
+ output = tout.Bytes()
+ }
+ fmt.Println(string(output))
+
+ return nil
+}
diff --git a/go/cmd/vtctldclient/command/vreplication/common/show.go b/go/cmd/vtctldclient/command/vreplication/common/show.go
new file mode 100644
index 00000000000..71e6675f690
--- /dev/null
+++ b/go/cmd/vtctldclient/command/vreplication/common/show.go
@@ -0,0 +1,68 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package common
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/cmd/vtctldclient/cli"
+
+ vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata"
+)
+
+var showOptions = struct {
+ IncludeLogs bool
+}{}
+
+func GetShowCommand(opts *SubCommandsOpts) *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "show",
+ Short: fmt.Sprintf("Show the details for a %s VReplication workflow.", opts.SubCommand),
+ Example: fmt.Sprintf(`vtctldclient --server localhost:15999 %s --workflow %s --target-keyspace customer show`, opts.SubCommand, opts.Workflow),
+ DisableFlagsInUseLine: true,
+ Aliases: []string{"Show"},
+ Args: cobra.NoArgs,
+ RunE: commandShow,
+ }
+ cmd.Flags().BoolVar(&showOptions.IncludeLogs, "include-logs", true, "Include recent logs for the workflow.")
+ return cmd
+}
+
+func commandShow(cmd *cobra.Command, args []string) error {
+ cli.FinishedParsing(cmd)
+
+ req := &vtctldatapb.GetWorkflowsRequest{
+ Keyspace: BaseOptions.TargetKeyspace,
+ Workflow: BaseOptions.Workflow,
+ IncludeLogs: showOptions.IncludeLogs,
+ }
+ resp, err := GetClient().GetWorkflows(GetCommandCtx(), req)
+ if err != nil {
+ return err
+ }
+
+ data, err := cli.MarshalJSONPretty(resp)
+ if err != nil {
+ return err
+ }
+
+ fmt.Printf("%s\n", data)
+
+ return nil
+}
diff --git a/go/cmd/vtctldclient/command/vreplication/common/status.go b/go/cmd/vtctldclient/command/vreplication/common/status.go
new file mode 100644
index 00000000000..ad038c42536
--- /dev/null
+++ b/go/cmd/vtctldclient/command/vreplication/common/status.go
@@ -0,0 +1,63 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package common
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/cmd/vtctldclient/cli"
+
+ vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata"
+)
+
+func GetStatusCommand(opts *SubCommandsOpts) *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "status",
+ Short: fmt.Sprintf("Show the current status for a %s VReplication workflow.", opts.SubCommand),
+ Example: fmt.Sprintf(`vtctldclient --server localhost:15999 %s --workflow %s --target-keyspace customer status`, opts.SubCommand, opts.Workflow),
+ DisableFlagsInUseLine: true,
+ Aliases: []string{"Status", "progress", "Progress"},
+ Args: cobra.NoArgs,
+ RunE: commandStatus,
+ }
+ return cmd
+}
+
+func commandStatus(cmd *cobra.Command, args []string) error {
+ format, err := GetOutputFormat(cmd)
+ if err != nil {
+ return err
+ }
+ cli.FinishedParsing(cmd)
+
+ req := &vtctldatapb.WorkflowStatusRequest{
+ Keyspace: BaseOptions.TargetKeyspace,
+ Workflow: BaseOptions.Workflow,
+ }
+ resp, err := GetClient().WorkflowStatus(GetCommandCtx(), req)
+ if err != nil {
+ return err
+ }
+
+ if err = OutputStatusResponse(resp, format); err != nil {
+ return err
+ }
+
+ return nil
+}
diff --git a/go/cmd/vtctldclient/command/vreplication/common/switchtraffic.go b/go/cmd/vtctldclient/command/vreplication/common/switchtraffic.go
new file mode 100644
index 00000000000..019367fe82b
--- /dev/null
+++ b/go/cmd/vtctldclient/command/vreplication/common/switchtraffic.go
@@ -0,0 +1,129 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package common
+
+import (
+ "bytes"
+ "fmt"
+
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/cmd/vtctldclient/cli"
+ "vitess.io/vitess/go/protoutil"
+ "vitess.io/vitess/go/vt/vtctl/workflow"
+
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+ vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata"
+)
+
+func GetSwitchTrafficCommand(opts *SubCommandsOpts) *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "switchtraffic",
+ Short: fmt.Sprintf("Switch traffic for a %s VReplication workflow.", opts.SubCommand),
+ Example: fmt.Sprintf(`vtctldclient --server localhost:15999 %s --workflow %s --target-keyspace customer switchtraffic --tablet-types "replica,rdonly"`, opts.SubCommand, opts.Workflow),
+ DisableFlagsInUseLine: true,
+ Aliases: []string{"SwitchTraffic"},
+ Args: cobra.NoArgs,
+ PreRunE: func(cmd *cobra.Command, args []string) error {
+ SwitchTrafficOptions.Direction = workflow.DirectionForward
+ if !cmd.Flags().Lookup("tablet-types").Changed {
+ // We switch traffic for all tablet types if none are provided.
+ SwitchTrafficOptions.TabletTypes = []topodatapb.TabletType{
+ topodatapb.TabletType_PRIMARY,
+ topodatapb.TabletType_REPLICA,
+ topodatapb.TabletType_RDONLY,
+ }
+ }
+ return nil
+ },
+ RunE: commandSwitchTraffic,
+ }
+ return cmd
+}
+
+func GetReverseTrafficCommand(opts *SubCommandsOpts) *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "reversetraffic",
+ Short: fmt.Sprintf("Reverse traffic for a %s VReplication workflow.", opts.SubCommand),
+ Example: fmt.Sprintf(`vtctldclient --server localhost:15999 %s --workflow %s --target-keyspace customer reversetraffic`, opts.SubCommand, opts.Workflow),
+ DisableFlagsInUseLine: true,
+ Aliases: []string{"ReverseTraffic"},
+ Args: cobra.NoArgs,
+ PreRunE: func(cmd *cobra.Command, args []string) error {
+ SwitchTrafficOptions.Direction = workflow.DirectionBackward
+ if !cmd.Flags().Lookup("tablet-types").Changed {
+ // We switch traffic for all tablet types if none are provided.
+ SwitchTrafficOptions.TabletTypes = []topodatapb.TabletType{
+ topodatapb.TabletType_PRIMARY,
+ topodatapb.TabletType_REPLICA,
+ topodatapb.TabletType_RDONLY,
+ }
+ }
+ return nil
+ },
+ RunE: commandSwitchTraffic,
+ }
+ return cmd
+}
+
+func commandSwitchTraffic(cmd *cobra.Command, args []string) error {
+ format, err := GetOutputFormat(cmd)
+ if err != nil {
+ return err
+ }
+
+ cli.FinishedParsing(cmd)
+
+ req := &vtctldatapb.WorkflowSwitchTrafficRequest{
+ Keyspace: BaseOptions.TargetKeyspace,
+ Workflow: BaseOptions.Workflow,
+ TabletTypes: SwitchTrafficOptions.TabletTypes,
+ MaxReplicationLagAllowed: protoutil.DurationToProto(SwitchTrafficOptions.MaxReplicationLagAllowed),
+ Timeout: protoutil.DurationToProto(SwitchTrafficOptions.Timeout),
+ DryRun: SwitchTrafficOptions.DryRun,
+ EnableReverseReplication: SwitchTrafficOptions.EnableReverseReplication,
+ InitializeTargetSequences: SwitchTrafficOptions.InitializeTargetSequences,
+ Direction: int32(SwitchTrafficOptions.Direction),
+ }
+ resp, err := GetClient().WorkflowSwitchTraffic(GetCommandCtx(), req)
+ if err != nil {
+ return err
+ }
+
+ var output []byte
+ if format == "json" {
+ output, err = cli.MarshalJSONPretty(resp)
+ if err != nil {
+ return err
+ }
+ } else {
+ tout := bytes.Buffer{}
+ tout.WriteString(resp.Summary + "\n\n")
+ if req.DryRun {
+ for _, line := range resp.DryRunResults {
+ tout.WriteString(line + "\n")
+ }
+ } else {
+ tout.WriteString(fmt.Sprintf("Start State: %s\n", resp.StartState))
+ tout.WriteString(fmt.Sprintf("Current State: %s\n", resp.CurrentState))
+ }
+ output = tout.Bytes()
+ }
+ fmt.Printf("%s\n", output)
+
+ return nil
+}
diff --git a/go/cmd/vtctldclient/command/vreplication/common/update.go b/go/cmd/vtctldclient/command/vreplication/common/update.go
new file mode 100644
index 00000000000..7875c9412ac
--- /dev/null
+++ b/go/cmd/vtctldclient/command/vreplication/common/update.go
@@ -0,0 +1,170 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package common
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+
+ "vitess.io/vitess/go/vt/proto/vtrpc"
+ "vitess.io/vitess/go/vt/vterrors"
+
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/cmd/vtctldclient/cli"
+ "vitess.io/vitess/go/textutil"
+
+ binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata"
+ tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata"
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+ vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata"
+)
+
+func bridgeToWorkflow(cmd *cobra.Command, args []string) {
+ workflowUpdateOptions.Workflow = BaseOptions.Workflow
+ workflowOptions.Keyspace = BaseOptions.TargetKeyspace
+}
+
+var (
+ workflowOptions = struct {
+ Keyspace string
+ }{}
+
+ workflowUpdateOptions = struct {
+ Workflow string
+ Cells []string
+ TabletTypes []topodatapb.TabletType
+ TabletTypesInPreferenceOrder bool
+ OnDDL string
+ }{}
+)
+
+func GetStartCommand(opts *SubCommandsOpts) *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "start",
+ Short: fmt.Sprintf("Start a %s workflow.", opts.SubCommand),
+ Example: fmt.Sprintf(`vtctldclient --server localhost:15999 %s --workflow %s --target-keyspace customer start`, opts.SubCommand, opts.Workflow),
+ DisableFlagsInUseLine: true,
+ Aliases: []string{"Start"},
+ Args: cobra.NoArgs,
+ PreRun: bridgeToWorkflow,
+ RunE: commandUpdateState,
+ }
+ return cmd
+}
+
+func GetStopCommand(opts *SubCommandsOpts) *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "stop",
+ Short: fmt.Sprintf("Stop a %s workflow.", opts.SubCommand),
+ Example: fmt.Sprintf(`vtctldclient --server localhost:15999 %s --workflow %s --target-keyspace customer stop`, opts.SubCommand, opts.Workflow),
+ DisableFlagsInUseLine: true,
+ Aliases: []string{"Stop"},
+ Args: cobra.NoArgs,
+ PreRun: bridgeToWorkflow,
+ RunE: commandUpdateState,
+ }
+ return cmd
+}
+
+func getWorkflow(keyspace, workflow string) (*vtctldatapb.GetWorkflowsResponse, error) {
+ resp, err := GetClient().GetWorkflows(GetCommandCtx(), &vtctldatapb.GetWorkflowsRequest{
+ Keyspace: keyspace,
+ Workflow: workflow,
+ })
+ if err != nil {
+ return &vtctldatapb.GetWorkflowsResponse{}, err
+ }
+ return resp, nil
+}
+
+// CanRestartWorkflow validates that, for an atomic copy workflow, none of the streams are still in the copy phase.
+// Since we copy all tables in a single snapshot, we cannot restart a workflow which broke before all tables were copied.
+func CanRestartWorkflow(keyspace, workflow string) error {
+ resp, err := getWorkflow(keyspace, workflow)
+ if err != nil {
+ return err
+ }
+ if len(resp.Workflows) == 0 {
+ return fmt.Errorf("workflow %s not found", workflow)
+ }
+ if len(resp.Workflows) > 1 {
+ return vterrors.Errorf(vtrpc.Code_INTERNAL, "multiple results found for workflow %s", workflow)
+ }
+ wf := resp.Workflows[0]
+ if wf.WorkflowSubType != binlogdatapb.VReplicationWorkflowSubType_AtomicCopy.String() {
+ return nil
+ }
+ // If we're here, we have an atomic copy workflow.
+ for _, shardStream := range wf.ShardStreams {
+ for _, stream := range shardStream.Streams {
+ if len(stream.CopyStates) > 0 {
+ return fmt.Errorf("stream %d is still in the copy phase: can only start workflow %s if all streams have completed the copy phase", stream.Id, workflow)
+ }
+ }
+ }
+ return nil
+}
+
+func commandUpdateState(cmd *cobra.Command, args []string) error {
+ cli.FinishedParsing(cmd)
+
+ var state binlogdatapb.VReplicationWorkflowState
+ switch strings.ToLower(cmd.Name()) {
+ case "start":
+ if err := CanRestartWorkflow(workflowOptions.Keyspace, workflowUpdateOptions.Workflow); err != nil {
+ return err
+ }
+ state = binlogdatapb.VReplicationWorkflowState_Running
+ case "stop":
+ state = binlogdatapb.VReplicationWorkflowState_Stopped
+ default:
+ return fmt.Errorf("invalid workflow state: %s", args[0])
+ }
+
+ // The only thing we're updating is the state.
+ req := &vtctldatapb.WorkflowUpdateRequest{
+ Keyspace: workflowOptions.Keyspace,
+ TabletRequest: &tabletmanagerdatapb.UpdateVReplicationWorkflowRequest{
+ Workflow: workflowUpdateOptions.Workflow,
+ Cells: textutil.SimulatedNullStringSlice,
+ TabletTypes: []topodatapb.TabletType{topodatapb.TabletType(textutil.SimulatedNullInt)},
+ OnDdl: binlogdatapb.OnDDLAction(textutil.SimulatedNullInt),
+ State: state,
+ },
+ }
+
+ resp, err := GetClient().WorkflowUpdate(GetCommandCtx(), req)
+ if err != nil {
+ return err
+ }
+
+ // Sort the inner TabletInfo slice for deterministic output.
+ sort.Slice(resp.Details, func(i, j int) bool {
+ return resp.Details[i].Tablet.String() < resp.Details[j].Tablet.String()
+ })
+
+ data, err := cli.MarshalJSONPretty(resp)
+ if err != nil {
+ return err
+ }
+
+ fmt.Printf("%s\n", data)
+
+ return nil
+}
diff --git a/go/cmd/vtctldclient/command/vreplication/common/utils.go b/go/cmd/vtctldclient/command/vreplication/common/utils.go
new file mode 100644
index 00000000000..da6e3329579
--- /dev/null
+++ b/go/cmd/vtctldclient/command/vreplication/common/utils.go
@@ -0,0 +1,245 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package common
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/cmd/vtctldclient/cli"
+ "vitess.io/vitess/go/vt/topo"
+ "vitess.io/vitess/go/vt/topo/topoproto"
+ "vitess.io/vitess/go/vt/vtctl/vtctldclient"
+ "vitess.io/vitess/go/vt/vtctl/workflow"
+
+ binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata"
+ tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata"
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+ vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata"
+)
+
+var (
+ client vtctldclient.VtctldClient
+ commandCtx context.Context
+ // The generic default for most commands.
+ tabletTypesDefault = []topodatapb.TabletType{
+ topodatapb.TabletType_REPLICA,
+ topodatapb.TabletType_PRIMARY,
+ }
+ onDDLDefault = binlogdatapb.OnDDLAction_IGNORE.String()
+ MaxReplicationLagDefault = 30 * time.Second
+ TimeoutDefault = 30 * time.Second
+
+ BaseOptions = struct {
+ Workflow string
+ TargetKeyspace string
+ Format string
+ }{}
+
+ CreateOptions = struct {
+ Cells []string
+ AllCells bool
+ TabletTypes []topodatapb.TabletType
+ TabletTypesInPreferenceOrder bool
+ OnDDL string
+ DeferSecondaryKeys bool
+ AutoStart bool
+ StopAfterCopy bool
+ }{}
+)
+
+var commandHandlers = make(map[string]func(cmd *cobra.Command))
+
+func RegisterCommandHandler(command string, handler func(cmd *cobra.Command)) {
+ commandHandlers[command] = handler
+}
+
+func RegisterCommands(root *cobra.Command) {
+ for _, handler := range commandHandlers {
+ handler(root)
+ }
+}
+
+type SubCommandsOpts struct {
+ SubCommand string
+ Workflow string // Used to specify an example workflow name for the Examples section of the help output.
+}
+
+func SetClient(c vtctldclient.VtctldClient) {
+ client = c
+}
+
+func GetClient() vtctldclient.VtctldClient {
+ return client
+}
+
+func SetCommandCtx(ctx context.Context) {
+ commandCtx = ctx
+}
+
+func GetCommandCtx() context.Context {
+ return commandCtx
+}
+
+func ParseCells(cmd *cobra.Command) error {
+ cf := cmd.Flags().Lookup("cells")
+ af := cmd.Flags().Lookup("all-cells")
+ if cf != nil && cf.Changed && af != nil && af.Changed {
+ return fmt.Errorf("cannot specify both --cells and --all-cells")
+ }
+ if cf.Changed { // Validate the provided value(s)
+ for i, cell := range CreateOptions.Cells { // Which only means trimming whitespace
+ CreateOptions.Cells[i] = strings.TrimSpace(cell)
+ }
+ }
+ if CreateOptions.AllCells { // Use all current cells
+ ctx, cancel := context.WithTimeout(commandCtx, topo.RemoteOperationTimeout)
+ defer cancel()
+ resp, err := client.GetCellInfoNames(ctx, &vtctldatapb.GetCellInfoNamesRequest{})
+ if err != nil {
+ return fmt.Errorf("failed to get current cells: %v", err)
+ }
+ CreateOptions.Cells = make([]string, len(resp.Names))
+ copy(CreateOptions.Cells, resp.Names)
+ }
+ return nil
+}
+
+func ParseTabletTypes(cmd *cobra.Command) error {
+ ttf := cmd.Flags().Lookup("tablet-types")
+ if ttf == nil {
+ return fmt.Errorf("no tablet-types flag found")
+ }
+ if !ttf.Changed {
+ CreateOptions.TabletTypes = tabletTypesDefault
+ } else if strings.TrimSpace(ttf.Value.String()) == "" {
+ return fmt.Errorf("invalid tablet-types value, at least one valid tablet type must be specified")
+ }
+ return nil
+}
+
+func validateOnDDL(cmd *cobra.Command) error {
+ if _, ok := binlogdatapb.OnDDLAction_value[strings.ToUpper(CreateOptions.OnDDL)]; !ok {
+ return fmt.Errorf("invalid on-ddl value: %s", CreateOptions.OnDDL)
+ }
+ return nil
+}
+
+func ParseAndValidateCreateOptions(cmd *cobra.Command) error {
+ if err := validateOnDDL(cmd); err != nil {
+ return err
+ }
+ if err := ParseCells(cmd); err != nil {
+ return err
+ }
+ if err := ParseTabletTypes(cmd); err != nil {
+ return err
+ }
+ return nil
+}
+
+func GetOutputFormat(cmd *cobra.Command) (string, error) {
+ format := strings.ToLower(strings.TrimSpace(BaseOptions.Format))
+ switch format {
+ case "text", "json":
+ return format, nil
+ default:
+ return "", fmt.Errorf("invalid output format, got %s", BaseOptions.Format)
+ }
+}
+
+func GetTabletSelectionPreference(cmd *cobra.Command) tabletmanagerdatapb.TabletSelectionPreference {
+ tsp := tabletmanagerdatapb.TabletSelectionPreference_ANY
+ if CreateOptions.TabletTypesInPreferenceOrder {
+ tsp = tabletmanagerdatapb.TabletSelectionPreference_INORDER
+ }
+ return tsp
+}
+
+func OutputStatusResponse(resp *vtctldatapb.WorkflowStatusResponse, format string) error {
+ var output []byte
+ var err error
+ if format == "json" {
+ output, err = cli.MarshalJSONPretty(resp)
+ if err != nil {
+ return err
+ }
+ } else {
+ tout := bytes.Buffer{}
+ tout.WriteString(fmt.Sprintf("The following vreplication streams exist for workflow %s.%s:\n\n",
+ BaseOptions.TargetKeyspace, BaseOptions.Workflow))
+ for _, shardstreams := range resp.ShardStreams {
+ for _, shardstream := range shardstreams.Streams {
+ tablet := fmt.Sprintf("%s-%d", shardstream.Tablet.Cell, shardstream.Tablet.Uid)
+ tout.WriteString(fmt.Sprintf("id=%d on %s/%s: Status: %s. %s.\n",
+ shardstream.Id, BaseOptions.TargetKeyspace, tablet, shardstream.Status, shardstream.Info))
+ }
+ }
+ tout.WriteString("\nTraffic State: ")
+ tout.WriteString(resp.TrafficState)
+ output = tout.Bytes()
+ }
+ fmt.Println(string(output))
+ return nil
+}
+
+func AddCommonFlags(cmd *cobra.Command) {
+ cmd.PersistentFlags().StringVar(&BaseOptions.TargetKeyspace, "target-keyspace", "", "Target keyspace for this workflow.")
+ cmd.MarkPersistentFlagRequired("target-keyspace")
+ cmd.PersistentFlags().StringVarP(&BaseOptions.Workflow, "workflow", "w", "", "The workflow you want to perform the command on.")
+ cmd.MarkPersistentFlagRequired("workflow")
+ cmd.PersistentFlags().StringVar(&BaseOptions.Format, "format", "text", "The format of the output; supported formats are: text,json.")
+}
+
+func AddCommonCreateFlags(cmd *cobra.Command) {
+ cmd.Flags().StringSliceVarP(&CreateOptions.Cells, "cells", "c", nil, "Cells and/or CellAliases to copy table data from.")
+ cmd.Flags().BoolVarP(&CreateOptions.AllCells, "all-cells", "a", false, "Copy table data from any existing cell.")
+ cmd.Flags().Var((*topoproto.TabletTypeListFlag)(&CreateOptions.TabletTypes), "tablet-types", "Source tablet types to replicate table data from (e.g. PRIMARY,REPLICA,RDONLY).")
+ cmd.Flags().BoolVar(&CreateOptions.TabletTypesInPreferenceOrder, "tablet-types-in-preference-order", true, "When performing source tablet selection, look for candidates in the type order as they are listed in the tablet-types flag.")
+ cmd.Flags().StringVar(&CreateOptions.OnDDL, "on-ddl", onDDLDefault, "What to do when DDL is encountered in the VReplication stream. Possible values are IGNORE, STOP, EXEC, and EXEC_IGNORE.")
+ cmd.Flags().BoolVar(&CreateOptions.DeferSecondaryKeys, "defer-secondary-keys", false, "Defer secondary index creation for a table until after it has been copied.")
+ cmd.Flags().BoolVar(&CreateOptions.AutoStart, "auto-start", true, "Start the workflow after creating it.")
+ cmd.Flags().BoolVar(&CreateOptions.StopAfterCopy, "stop-after-copy", false, "Stop the workflow after it's finished copying the existing rows and before it starts replicating changes.")
+}
+
+var SwitchTrafficOptions = struct {
+ Cells []string
+ TabletTypes []topodatapb.TabletType
+ Timeout time.Duration
+ MaxReplicationLagAllowed time.Duration
+ EnableReverseReplication bool
+ DryRun bool
+ Direction workflow.TrafficSwitchDirection
+ InitializeTargetSequences bool
+}{}
+
+func AddCommonSwitchTrafficFlags(cmd *cobra.Command, initializeTargetSequences bool) {
+ cmd.Flags().StringSliceVarP(&SwitchTrafficOptions.Cells, "cells", "c", nil, "Cells and/or CellAliases to switch traffic in.")
+ cmd.Flags().Var((*topoproto.TabletTypeListFlag)(&SwitchTrafficOptions.TabletTypes), "tablet-types", "Tablet types to switch traffic for.")
+ cmd.Flags().DurationVar(&SwitchTrafficOptions.Timeout, "timeout", TimeoutDefault, "Specifies the maximum time to wait, in seconds, for VReplication to catch up on primary tablets. The traffic switch will be cancelled on timeout.")
+ cmd.Flags().DurationVar(&SwitchTrafficOptions.MaxReplicationLagAllowed, "max-replication-lag-allowed", MaxReplicationLagDefault, "Allow traffic to be switched only if VReplication lag is below this.")
+ cmd.Flags().BoolVar(&SwitchTrafficOptions.EnableReverseReplication, "enable-reverse-replication", true, "Setup replication going back to the original source keyspace to support rolling back the traffic cutover.")
+ cmd.Flags().BoolVar(&SwitchTrafficOptions.DryRun, "dry-run", false, "Print the actions that would be taken and report any known errors that would have occurred.")
+ if initializeTargetSequences {
+ cmd.Flags().BoolVar(&SwitchTrafficOptions.InitializeTargetSequences, "initialize-target-sequences", false, "When moving tables from an unsharded keyspace to a sharded keyspace, initialize any sequences that are being used on the target when switching writes.")
+ }
+}
diff --git a/go/cmd/vtctldclient/command/vreplication/common/utils_test.go b/go/cmd/vtctldclient/command/vreplication/common/utils_test.go
new file mode 100644
index 00000000000..0dc179060d6
--- /dev/null
+++ b/go/cmd/vtctldclient/command/vreplication/common/utils_test.go
@@ -0,0 +1,153 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package common_test
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/spf13/cobra"
+ "github.com/stretchr/testify/require"
+
+ "vitess.io/vitess/go/cmd/vtctldclient/command"
+ "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common"
+ "vitess.io/vitess/go/vt/topo"
+ "vitess.io/vitess/go/vt/topo/memorytopo"
+ "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver"
+ "vitess.io/vitess/go/vt/vtctl/localvtctldclient"
+ "vitess.io/vitess/go/vt/vtctl/vtctldclient"
+ "vitess.io/vitess/go/vt/vttablet/tmclient"
+)
+
+func TestParseAndValidateCreateOptions(t *testing.T) {
+ common.SetCommandCtx(context.Background())
+ ctx, cancel := context.WithTimeout(common.GetCommandCtx(), 60*time.Second)
+ defer cancel()
+ cells := []string{"zone1", "zone2", "zone3"}
+ SetupLocalVtctldClient(t, ctx, cells...)
+
+ tests := []struct {
+ name string
+ setFunc func(*cobra.Command) error
+ wantErr bool
+ checkFunc func()
+ }{
+ {
+ name: "invalid tablet type",
+ setFunc: func(cmd *cobra.Command) error {
+ tabletTypesFlag := cmd.Flags().Lookup("tablet-types")
+ err := tabletTypesFlag.Value.Set("invalid")
+ tabletTypesFlag.Changed = true
+ return err
+ },
+ wantErr: true,
+ },
+ {
+ name: "no tablet types",
+ setFunc: func(cmd *cobra.Command) error {
+ tabletTypesFlag := cmd.Flags().Lookup("tablet-types")
+ err := tabletTypesFlag.Value.Set("")
+ tabletTypesFlag.Changed = true
+ return err
+ },
+ wantErr: true,
+ },
+ {
+ name: "valid tablet types",
+ setFunc: func(cmd *cobra.Command) error {
+ tabletTypesFlag := cmd.Flags().Lookup("tablet-types")
+ err := tabletTypesFlag.Value.Set("rdonly,replica")
+ tabletTypesFlag.Changed = true
+ return err
+ },
+ wantErr: false,
+ },
+ {
+ name: "cells and all-cells",
+ setFunc: func(cmd *cobra.Command) error {
+ cellsFlag := cmd.Flags().Lookup("cells")
+ allCellsFlag := cmd.Flags().Lookup("all-cells")
+ if err := cellsFlag.Value.Set("cella"); err != nil {
+ return err
+ }
+ cellsFlag.Changed = true
+ if err := allCellsFlag.Value.Set("true"); err != nil {
+ return err
+ }
+ allCellsFlag.Changed = true
+ return nil
+ },
+ wantErr: true,
+ },
+ {
+ name: "all cells",
+ setFunc: func(cmd *cobra.Command) error {
+ allCellsFlag := cmd.Flags().Lookup("all-cells")
+ if err := allCellsFlag.Value.Set("true"); err != nil {
+ return err
+ }
+ allCellsFlag.Changed = true
+ return nil
+ },
+ wantErr: false,
+ checkFunc: func() {
+ require.Equal(t, cells, common.CreateOptions.Cells)
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ cmd := &cobra.Command{}
+ common.AddCommonCreateFlags(cmd)
+ test := func() error {
+ if tt.setFunc != nil {
+ if err := tt.setFunc(cmd); err != nil {
+ return err
+ }
+ }
+ if err := common.ParseAndValidateCreateOptions(cmd); err != nil {
+ return err
+ }
+ return nil
+ }
+ if err := test(); (err != nil) != tt.wantErr {
+ t.Errorf("ParseAndValidateCreateOptions() error = %v, wantErr %t", err, tt.wantErr)
+ }
+ if tt.checkFunc != nil {
+ tt.checkFunc()
+ }
+ })
+ }
+}
+
+// SetupLocalVtctldClient sets up a local or internal VtctldServer and
+// VtctldClient for tests. It uses a memorytopo instance which contains
+// the cells provided.
+func SetupLocalVtctldClient(t *testing.T, ctx context.Context, cells ...string) {
+ ts, factory := memorytopo.NewServerAndFactory(ctx, cells...)
+ topo.RegisterFactory("test", factory)
+ tmclient.RegisterTabletManagerClientFactory("grpc", func() tmclient.TabletManagerClient {
+ return nil
+ })
+ vtctld := grpcvtctldserver.NewVtctldServer(ts)
+ localvtctldclient.SetServer(vtctld)
+ command.VtctldClientProtocol = "local"
+ client, err := vtctldclient.New(command.VtctldClientProtocol, "")
+ require.NoError(t, err, "failed to create local vtctld client which uses an internal vtctld server")
+ common.SetClient(client)
+}
diff --git a/go/cmd/vtctldclient/command/vreplication/lookupvindex/lookupvindex.go b/go/cmd/vtctldclient/command/vreplication/lookupvindex/lookupvindex.go
new file mode 100644
index 00000000000..b703e873bd0
--- /dev/null
+++ b/go/cmd/vtctldclient/command/vreplication/lookupvindex/lookupvindex.go
@@ -0,0 +1,321 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package lookupvindex
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/cmd/vtctldclient/cli"
+ "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common"
+
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+ vschemapb "vitess.io/vitess/go/vt/proto/vschema"
+ vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata"
+ topoprotopb "vitess.io/vitess/go/vt/topo/topoproto"
+)
+
+var (
+ tabletTypesDefault = []topodatapb.TabletType{
+ topodatapb.TabletType_REPLICA,
+ topodatapb.TabletType_PRIMARY,
+ }
+
+ baseOptions = struct {
+ // This is where the lookup table and VReplicaiton workflow
+ // will be created.
+ TableKeyspace string
+ // This will be the name of the Lookup Vindex and the name
+ // of the VReplication workflow.
+ Name string
+ Vschema *vschemapb.Keyspace
+ }{}
+
+ // base is the base command for all actions related to Lookup Vindexes.
+ base = &cobra.Command{
+ Use: "LookupVindex --name --table-keyspace [command] [command-flags]",
+ Short: "Perform commands related to creating, backfilling, and externalizing Lookup Vindexes using VReplication workflows.",
+ DisableFlagsInUseLine: true,
+ Aliases: []string{"lookupvindex"},
+ Args: cobra.NoArgs,
+ }
+
+ createOptions = struct {
+ Keyspace string
+ Type string
+ TableOwner string
+ TableOwnerColumns []string
+ TableName string
+ TableVindexType string
+ Cells []string
+ TabletTypes []topodatapb.TabletType
+ TabletTypesInPreferenceOrder bool
+ IgnoreNulls bool
+ ContinueAfterCopyWithOwner bool
+ }{}
+
+ externalizeOptions = struct {
+ Keyspace string
+ }{}
+
+ parseAndValidateCreate = func(cmd *cobra.Command, args []string) error {
+ if createOptions.TableName == "" { // Use vindex name
+ createOptions.TableName = baseOptions.Name
+ }
+ if !strings.Contains(createOptions.Type, "lookup") {
+ return fmt.Errorf("vindex type must be a lookup vindex")
+ }
+ baseOptions.Vschema = &vschemapb.Keyspace{
+ Vindexes: map[string]*vschemapb.Vindex{
+ baseOptions.Name: {
+ Type: createOptions.Type,
+ Params: map[string]string{
+ "table": baseOptions.TableKeyspace + "." + createOptions.TableName,
+ "from": strings.Join(createOptions.TableOwnerColumns, ","),
+ "to": "keyspace_id",
+ "ignore_nulls": fmt.Sprintf("%t", createOptions.IgnoreNulls),
+ },
+ Owner: createOptions.TableOwner,
+ },
+ },
+ Tables: map[string]*vschemapb.Table{
+ createOptions.TableOwner: {
+ ColumnVindexes: []*vschemapb.ColumnVindex{
+ {
+ Name: baseOptions.Name,
+ Columns: createOptions.TableOwnerColumns,
+ },
+ },
+ },
+ createOptions.TableName: {
+ ColumnVindexes: []*vschemapb.ColumnVindex{
+ {
+ // If the vindex name/type is empty then we'll fill this in
+ // later using the defult for the column types.
+ Name: createOptions.TableVindexType,
+ Columns: createOptions.TableOwnerColumns,
+ },
+ },
+ },
+ },
+ }
+
+ // VReplication specific flags.
+ ttFlag := cmd.Flags().Lookup("tablet-types")
+ if ttFlag != nil && ttFlag.Changed {
+ createOptions.TabletTypes = tabletTypesDefault
+ }
+ cFlag := cmd.Flags().Lookup("cells")
+ if cFlag != nil && cFlag.Changed {
+ for i, cell := range createOptions.Cells {
+ createOptions.Cells[i] = strings.TrimSpace(cell)
+ }
+ }
+ return nil
+ }
+
+ // cancel makes a WorkflowDelete call to a vtctld.
+ cancel = &cobra.Command{
+ Use: "cancel",
+ Short: "Cancel the VReplication workflow that backfills the Lookup Vindex.",
+ Example: `vtctldclient --server localhost:15999 LookupVindex --name corder_lookup_vdx --table-keyspace customer cancel`,
+ SilenceUsage: true,
+ DisableFlagsInUseLine: true,
+ Aliases: []string{"Cancel"},
+ Args: cobra.NoArgs,
+ RunE: commandCancel,
+ }
+
+ // create makes a LookupVindexCreate call to a vtctld.
+ create = &cobra.Command{
+ Use: "create",
+ Short: "Create the Lookup Vindex in the specified keyspace and backfill it with a VReplication workflow.",
+ Example: `vtctldclient --server localhost:15999 LookupVindex --name corder_lookup_vdx --table-keyspace customer create --keyspace customer --type consistent_lookup_unique --table-owner corder --table-owner-columns sku --table-name corder_lookup_tbl --table-vindex-type unicode_loose_xxhash`,
+ SilenceUsage: true,
+ DisableFlagsInUseLine: true,
+ Aliases: []string{"Create"},
+ Args: cobra.NoArgs,
+ PreRunE: parseAndValidateCreate,
+ RunE: commandCreate,
+ }
+
+ // externalize makes a LookupVindexExternalize call to a vtctld.
+ externalize = &cobra.Command{
+ Use: "externalize",
+ Short: "Externalize the Lookup Vindex. If the Vindex has an owner the VReplication workflow will also be deleted.",
+ Example: `vtctldclient --server localhost:15999 LookupVindex --name corder_lookup_vdx --table-keyspace customer externalize`,
+ SilenceUsage: true,
+ DisableFlagsInUseLine: true,
+ Aliases: []string{"Externalize"},
+ Args: cobra.NoArgs,
+ RunE: commandExternalize,
+ }
+
+ // show makes a GetWorkflows call to a vtctld.
+ show = &cobra.Command{
+ Use: "show",
+ Short: "Show the status of the VReplication workflow that backfills the Lookup Vindex.",
+ Example: `vtctldclient --server localhost:15999 LookupVindex --name corder_lookup_vdx --table-keyspace customer show`,
+ SilenceUsage: true,
+ DisableFlagsInUseLine: true,
+ Aliases: []string{"Show"},
+ Args: cobra.NoArgs,
+ RunE: commandShow,
+ }
+)
+
+func commandCancel(cmd *cobra.Command, args []string) error {
+ cli.FinishedParsing(cmd)
+
+ req := &vtctldatapb.WorkflowDeleteRequest{
+ Keyspace: baseOptions.TableKeyspace,
+ Workflow: baseOptions.Name,
+ }
+ _, err := common.GetClient().WorkflowDelete(common.GetCommandCtx(), req)
+ if err != nil {
+ return err
+ }
+
+ output := fmt.Sprintf("LookupVindex %s left in place and the %s VReplication wokflow has been deleted",
+ baseOptions.Name, baseOptions.Name)
+ fmt.Println(output)
+
+ return nil
+}
+
+func commandCreate(cmd *cobra.Command, args []string) error {
+ tsp := common.GetTabletSelectionPreference(cmd)
+ cli.FinishedParsing(cmd)
+
+ _, err := common.GetClient().LookupVindexCreate(common.GetCommandCtx(), &vtctldatapb.LookupVindexCreateRequest{
+ Workflow: baseOptions.Name,
+ Keyspace: createOptions.Keyspace,
+ Vindex: baseOptions.Vschema,
+ ContinueAfterCopyWithOwner: createOptions.ContinueAfterCopyWithOwner,
+ Cells: createOptions.Cells,
+ TabletTypes: createOptions.TabletTypes,
+ TabletSelectionPreference: tsp,
+ })
+
+ if err != nil {
+ return err
+ }
+
+ output := fmt.Sprintf("LookupVindex %s created in the %s keyspace and the %s VReplication wokflow scheduled on the %s shards, use show to view progress",
+ baseOptions.Name, createOptions.Keyspace, baseOptions.Name, baseOptions.TableKeyspace)
+ fmt.Println(output)
+
+ return nil
+}
+
+func commandExternalize(cmd *cobra.Command, args []string) error {
+ if externalizeOptions.Keyspace == "" {
+ externalizeOptions.Keyspace = baseOptions.TableKeyspace
+ }
+ cli.FinishedParsing(cmd)
+
+ resp, err := common.GetClient().LookupVindexExternalize(common.GetCommandCtx(), &vtctldatapb.LookupVindexExternalizeRequest{
+ Keyspace: externalizeOptions.Keyspace,
+ // The name of the workflow and lookup vindex.
+ Name: baseOptions.Name,
+ // Where the lookup table and VReplication workflow were created.
+ TableKeyspace: baseOptions.TableKeyspace,
+ })
+
+ if err != nil {
+ return err
+ }
+
+ output := fmt.Sprintf("LookupVindex %s has been externalized", baseOptions.Name)
+ if resp.WorkflowDeleted {
+ output = output + fmt.Sprintf(" and the %s VReplication workflow has been deleted", baseOptions.Name)
+ }
+ fmt.Println(output)
+
+ return nil
+}
+
+func commandShow(cmd *cobra.Command, args []string) error {
+ cli.FinishedParsing(cmd)
+
+ req := &vtctldatapb.GetWorkflowsRequest{
+ Keyspace: baseOptions.TableKeyspace,
+ Workflow: baseOptions.Name,
+ }
+ resp, err := common.GetClient().GetWorkflows(common.GetCommandCtx(), req)
+ if err != nil {
+ return err
+ }
+
+ data, err := cli.MarshalJSONPretty(resp)
+ if err != nil {
+ return err
+ }
+
+ fmt.Printf("%s\n", data)
+
+ return nil
+}
+
+func registerCommands(root *cobra.Command) {
+ base.PersistentFlags().StringVar(&baseOptions.Name, "name", "", "The name of the Lookup Vindex to create. This will also be the name of the VReplication workflow created to backfill the Lookup Vindex.")
+ base.MarkPersistentFlagRequired("name")
+ base.PersistentFlags().StringVar(&baseOptions.TableKeyspace, "table-keyspace", "", "The keyspace to create the lookup table in. This is also where the VReplication workflow is created to backfill the Lookup Vindex.")
+ base.MarkPersistentFlagRequired("table-keyspace")
+ root.AddCommand(base)
+
+ // This will create the lookup vindex in the specified keyspace
+ // and setup a VReplication workflow to backfill its lookup table.
+ create.Flags().StringVar(&createOptions.Keyspace, "keyspace", "", "The keyspace to create the Lookup Vindex in. This is also where the table-owner must exist.")
+ create.MarkFlagRequired("keyspace")
+ create.Flags().StringVar(&createOptions.Type, "type", "", "The type of Lookup Vindex to create.")
+ create.MarkFlagRequired("type")
+ create.Flags().StringVar(&createOptions.TableOwner, "table-owner", "", "The table holding the data which we should use to backfill the Lookup Vindex. This must exist in the same keyspace as the Lookup Vindex.")
+ create.MarkFlagRequired("table-owner")
+ create.Flags().StringSliceVar(&createOptions.TableOwnerColumns, "table-owner-columns", nil, "The columns to read from the owner table. These will be used to build the hash which gets stored as the keyspace_id value in the lookup table.")
+ create.MarkFlagRequired("table-owner-columns")
+ create.Flags().StringVar(&createOptions.TableName, "table-name", "", "The name of the lookup table. If not specified, then it will be created using the same name as the Lookup Vindex.")
+ create.Flags().StringVar(&createOptions.TableVindexType, "table-vindex-type", "", "The primary vindex name/type to use for the lookup table, if the table-keyspace is sharded. This must match the name of a vindex defined in the table-keyspace. If no value is provided then the default type will be used based on the table-owner-columns types.")
+ create.Flags().BoolVar(&createOptions.IgnoreNulls, "ignore-nulls", false, "Do not add corresponding records in the lookup table if any of the owner table's 'from' fields are NULL.")
+ create.Flags().BoolVar(&createOptions.ContinueAfterCopyWithOwner, "continue-after-copy-with-owner", true, "Vindex will continue materialization after the backfill completes when an owner is provided.")
+ // VReplication specific flags.
+ create.Flags().StringSliceVar(&createOptions.Cells, "cells", nil, "Cells to look in for source tablets to replicate from.")
+ create.Flags().Var((*topoprotopb.TabletTypeListFlag)(&createOptions.TabletTypes), "tablet-types", "Source tablet types to replicate from.")
+ create.Flags().BoolVar(&createOptions.TabletTypesInPreferenceOrder, "tablet-types-in-preference-order", true, "When performing source tablet selection, look for candidates in the type order as they are listed in the tablet-types flag.")
+ base.AddCommand(create)
+
+ // This will show the output of GetWorkflows client call
+ // for the VReplication workflow used.
+ base.AddCommand(show)
+
+ // This will also delete the VReplication workflow if the
+ // vindex has an owner as the lookup vindex will then be
+ // managed by VTGate.
+ externalize.Flags().StringVar(&externalizeOptions.Keyspace, "keyspace", "", "The keyspace containing the Lookup Vindex. If no value is specified then the table-keyspace will be used.")
+ base.AddCommand(externalize)
+
+ // The cancel command deletes the VReplication workflow used
+ // to backfill the lookup vindex. It ends up making a
+ // WorkflowDelete VtctldServer call.
+ base.AddCommand(cancel)
+}
+
+func init() {
+ common.RegisterCommandHandler("LookupVindex", registerCommands)
+}
diff --git a/go/cmd/vtctldclient/command/vreplication/materialize/create.go b/go/cmd/vtctldclient/command/vreplication/materialize/create.go
new file mode 100644
index 00000000000..d835b0f3426
--- /dev/null
+++ b/go/cmd/vtctldclient/command/vreplication/materialize/create.go
@@ -0,0 +1,189 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package materialize
+
+import (
+ "encoding/json"
+ "fmt"
+ "strings"
+
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/cmd/vtctldclient/cli"
+ "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common"
+ "vitess.io/vitess/go/vt/sqlparser"
+ "vitess.io/vitess/go/vt/topo/topoproto"
+
+ vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata"
+)
+
+var (
+ createOptions = struct {
+ SourceKeyspace string
+ TableSettings tableSettings
+ }{}
+
+ // create makes a MaterializeCreate gRPC call to a vtctld.
+ create = &cobra.Command{
+ Use: "create",
+ Short: "Create and run a Materialize VReplication workflow.",
+ Example: `vtctldclient --server localhost:15999 materialize --workflow product_sales --target-keyspace commerce create --source-keyspace commerce --table-settings '[{"target_table": "sales_by_sku", "create_ddl": "create table sales_by_sku (sku varbinary(128) not null primary key, orders bigint, revenue bigint)", "source_expression": "select sku, count(*) as orders, sum(price) as revenue from corder group by sku"}]' --cells zone1 --cells zone2 --tablet-types replica`,
+ Long: `Materialize is a lower level VReplication command that allows for generalized materialization
+of tables. The target tables can be copies, aggregations, or views. The target tables are kept
+in sync in near-realtime. The primary flag used to define the materializations (you can have
+multiple per workflow) is table-settings which is a JSON array where each value must contain
+two key/value pairs. The first required key is 'target_table' and it is the name of the table
+in the target-keyspace to store the results in. The second required key is 'source_expression'
+and its value is the select query to run against the source table. An optional key/value pair
+can also be specified for 'create_ddl' which provides the DDL to create the target table if it
+does not exist -- you can alternatively specify a value of 'copy' if the target table schema
+should be copied as-is from the source keyspace. Here's an example value for table-settings:
+[
+ {
+ "target_table": "customer_one_email",
+ "source_expression": "select email from customer where customer_id = 1"
+ },
+ {
+ "target_table": "states",
+ "source_expression": "select * from states",
+ "create_ddl": "copy"
+ },
+ {
+ "target_table": "sales_by_sku",
+ "source_expression": "select sku, count(*) as orders, sum(price) as revenue from corder group by sku",
+ "create_ddl": "create table sales_by_sku (sku varbinary(128) not null primary key, orders bigint, revenue bigint)"
+ }
+]
+`,
+ SilenceUsage: true,
+ DisableFlagsInUseLine: true,
+ Aliases: []string{"Create"},
+ Args: cobra.NoArgs,
+ PreRunE: func(cmd *cobra.Command, args []string) error {
+ if err := common.ParseAndValidateCreateOptions(cmd); err != nil {
+ return err
+ }
+ return nil
+ },
+ RunE: commandCreate,
+ }
+)
+
+func commandCreate(cmd *cobra.Command, args []string) error {
+ format, err := common.GetOutputFormat(cmd)
+ if err != nil {
+ return err
+ }
+ tsp := common.GetTabletSelectionPreference(cmd)
+ cli.FinishedParsing(cmd)
+
+ ms := &vtctldatapb.MaterializeSettings{
+ Workflow: common.BaseOptions.Workflow,
+ TargetKeyspace: common.BaseOptions.TargetKeyspace,
+ SourceKeyspace: createOptions.SourceKeyspace,
+ TableSettings: createOptions.TableSettings.val,
+ StopAfterCopy: common.CreateOptions.StopAfterCopy,
+ Cell: strings.Join(common.CreateOptions.Cells, ","),
+ TabletTypes: topoproto.MakeStringTypeCSV(common.CreateOptions.TabletTypes),
+ TabletSelectionPreference: tsp,
+ }
+
+ req := &vtctldatapb.MaterializeCreateRequest{
+ Settings: ms,
+ }
+
+ _, err = common.GetClient().MaterializeCreate(common.GetCommandCtx(), req)
+ if err != nil {
+ return err
+ }
+
+ if format == "json" {
+ resp := struct {
+ Action string
+ Status string
+ }{
+ Action: "create",
+ Status: "success",
+ }
+ jsonText, _ := cli.MarshalJSONPretty(resp)
+ fmt.Println(string(jsonText))
+ } else {
+ fmt.Printf("Materialization workflow %s successfully created in the %s keyspace. Use show to view the status.\n",
+ common.BaseOptions.Workflow, common.BaseOptions.TargetKeyspace)
+ }
+
+ return nil
+}
+
+// tableSettings is a wrapper around a slice of TableMaterializeSettings
+// proto messages that implements the pflag.Value interface.
+type tableSettings struct {
+ val []*vtctldatapb.TableMaterializeSettings
+}
+
+func (ts *tableSettings) String() string {
+ tsj, _ := json.Marshal(ts.val)
+ return string(tsj)
+}
+
+func (ts *tableSettings) Set(v string) error {
+ ts.val = make([]*vtctldatapb.TableMaterializeSettings, 0)
+ err := json.Unmarshal([]byte(v), &ts.val)
+ if err != nil {
+ return fmt.Errorf("table-settings is not valid JSON")
+ }
+ if len(ts.val) == 0 {
+ return fmt.Errorf("empty table-settings")
+ }
+
+ // Validate the provided queries.
+ seenSourceTables := make(map[string]bool)
+ for _, tms := range ts.val {
+ if tms.TargetTable == "" || tms.SourceExpression == "" {
+ return fmt.Errorf("missing target_table or source_expression")
+ }
+ // Validate that the query is valid.
+ stmt, err := sqlparser.Parse(tms.SourceExpression)
+ if err != nil {
+ return fmt.Errorf("invalid source_expression: %q", tms.SourceExpression)
+ }
+ // Validate that each source-expression uses a different table.
+ // If any of them query the same table the materialize workflow
+ // will fail.
+ err = sqlparser.Walk(func(node sqlparser.SQLNode) (kontinue bool, err error) {
+ switch node := node.(type) {
+ case sqlparser.TableName:
+ if !node.Name.IsEmpty() {
+ if seenSourceTables[node.Name.String()] {
+ return false, fmt.Errorf("multiple source_expression queries use the same table: %q", node.Name.String())
+ }
+ seenSourceTables[node.Name.String()] = true
+ }
+ }
+ return true, nil
+ }, stmt)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (ts *tableSettings) Type() string {
+ return "JSON"
+}
diff --git a/go/cmd/vtctldclient/command/vreplication/materialize/materialize.go b/go/cmd/vtctldclient/command/vreplication/materialize/materialize.go
new file mode 100644
index 00000000000..58be1ec4433
--- /dev/null
+++ b/go/cmd/vtctldclient/command/vreplication/materialize/materialize.go
@@ -0,0 +1,64 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package materialize
+
+import (
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common"
+ "vitess.io/vitess/go/vt/topo/topoproto"
+)
+
+var (
+ // base is the base command for all actions related to Materialize.
+ base = &cobra.Command{
+ Use: "Materialize --workflow --target-keyspace [command] [command-flags]",
+ Short: "Perform commands related to materializing query results from the source keyspace into tables in the target keyspace.",
+ DisableFlagsInUseLine: true,
+ Aliases: []string{"materialize"},
+ Args: cobra.ExactArgs(1),
+ }
+)
+
+func registerCommands(root *cobra.Command) {
+ common.AddCommonFlags(base)
+ root.AddCommand(base)
+
+ create.Flags().StringSliceVarP(&common.CreateOptions.Cells, "cells", "c", nil, "Cells and/or CellAliases to copy table data from.")
+ create.Flags().Var((*topoproto.TabletTypeListFlag)(&common.CreateOptions.TabletTypes), "tablet-types", "Source tablet types to replicate table data from (e.g. PRIMARY,REPLICA,RDONLY).")
+ create.Flags().BoolVar(&common.CreateOptions.TabletTypesInPreferenceOrder, "tablet-types-in-preference-order", true, "When performing source tablet selection, look for candidates in the type order as they are listed in the tablet-types flag.")
+ create.Flags().StringVar(&createOptions.SourceKeyspace, "source-keyspace", "", "Keyspace where the tables queried in the 'source_expression' values within table-settings live.")
+ create.MarkFlagRequired("source-keyspace")
+ create.Flags().Var(&createOptions.TableSettings, "table-settings", "A JSON array defining what tables to materialize using what select statements. See the --help output for more details.")
+ create.MarkFlagRequired("table-settings")
+ create.Flags().BoolVar(&common.CreateOptions.StopAfterCopy, "stop-after-copy", false, "Stop the workflow after it's finished copying the existing rows and before it starts replicating changes.")
+ base.AddCommand(create)
+
+ // Generic workflow commands.
+ opts := &common.SubCommandsOpts{
+ SubCommand: "Materialize",
+ Workflow: "product_sales",
+ }
+ base.AddCommand(common.GetCancelCommand(opts))
+ base.AddCommand(common.GetShowCommand(opts))
+ base.AddCommand(common.GetStartCommand(opts))
+ base.AddCommand(common.GetStopCommand(opts))
+}
+
+func init() {
+ common.RegisterCommandHandler("Materialize", registerCommands)
+}
diff --git a/go/cmd/vtctldclient/command/vreplication/migrate/migrate.go b/go/cmd/vtctldclient/command/vreplication/migrate/migrate.go
new file mode 100644
index 00000000000..25f54ec71af
--- /dev/null
+++ b/go/cmd/vtctldclient/command/vreplication/migrate/migrate.go
@@ -0,0 +1,134 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package migrate
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/cmd/vtctldclient/cli"
+ "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common"
+
+ vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata"
+)
+
+var (
+ // migrate is the base command for all actions related to the migrate command.
+ migrate = &cobra.Command{
+ Use: "Migrate --workflow --target-keyspace [command] [command-flags]",
+ Short: "Migrate is used to import data from an external cluster into the current cluster.",
+ DisableFlagsInUseLine: true,
+ Aliases: []string{"migrate"},
+ Args: cobra.ExactArgs(1),
+ }
+)
+
+var createOptions = struct {
+ MountName string
+ SourceKeyspace string
+ AllTables bool
+ IncludeTables []string
+ ExcludeTables []string
+ SourceTimeZone string
+ NoRoutingRules bool
+}{}
+
+var createCommand = &cobra.Command{
+ Use: "create",
+ Short: "Create and optionally run a Migrate VReplication workflow.",
+ Example: `vtctldclient --server localhost:15999 migrate --workflow import --target-keyspace customer create --source-keyspace commerce --mount-name ext1 --tablet-types replica`,
+ SilenceUsage: true,
+ DisableFlagsInUseLine: true,
+ Aliases: []string{"Create"},
+ Args: cobra.NoArgs,
+ PreRunE: func(cmd *cobra.Command, args []string) error {
+ // Either specific tables or the all tables flags are required.
+ if !cmd.Flags().Lookup("tables").Changed && !cmd.Flags().Lookup("all-tables").Changed {
+ return fmt.Errorf("tables or all-tables are required to specify which tables to move")
+ }
+ if err := common.ParseAndValidateCreateOptions(cmd); err != nil {
+ return err
+ }
+ return nil
+ },
+ RunE: commandCreate,
+}
+
+func commandCreate(cmd *cobra.Command, args []string) error {
+ tsp := common.GetTabletSelectionPreference(cmd)
+ cli.FinishedParsing(cmd)
+
+ req := &vtctldatapb.MigrateCreateRequest{
+ Workflow: common.BaseOptions.Workflow,
+ TargetKeyspace: common.BaseOptions.TargetKeyspace,
+ SourceKeyspace: createOptions.SourceKeyspace,
+ MountName: createOptions.MountName,
+ SourceTimeZone: createOptions.SourceTimeZone,
+ Cells: common.CreateOptions.Cells,
+ TabletTypes: common.CreateOptions.TabletTypes,
+ TabletSelectionPreference: tsp,
+ AllTables: createOptions.AllTables,
+ IncludeTables: createOptions.IncludeTables,
+ ExcludeTables: createOptions.ExcludeTables,
+ OnDdl: common.CreateOptions.OnDDL,
+ DeferSecondaryKeys: common.CreateOptions.DeferSecondaryKeys,
+ AutoStart: common.CreateOptions.AutoStart,
+ StopAfterCopy: common.CreateOptions.StopAfterCopy,
+ NoRoutingRules: createOptions.NoRoutingRules,
+ }
+
+ _, err := common.GetClient().MigrateCreate(common.GetCommandCtx(), req)
+ if err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func addCreateFlags(cmd *cobra.Command) {
+ common.AddCommonCreateFlags(cmd)
+ cmd.Flags().StringVar(&createOptions.SourceKeyspace, "source-keyspace", "", "Keyspace where the tables are being moved from.")
+ cmd.MarkFlagRequired("source-keyspace")
+ cmd.Flags().StringVar(&createOptions.MountName, "mount-name", "", "Name external cluster is mounted as.")
+ cmd.MarkFlagRequired("mount-name")
+ cmd.Flags().StringVar(&createOptions.SourceTimeZone, "source-time-zone", "", "Specifying this causes any DATETIME fields to be converted from the given time zone into UTC.")
+ cmd.Flags().BoolVar(&createOptions.AllTables, "all-tables", false, "Copy all tables from the source.")
+ cmd.Flags().StringSliceVar(&createOptions.IncludeTables, "tables", nil, "Source tables to copy.")
+ cmd.Flags().StringSliceVar(&createOptions.ExcludeTables, "exclude-tables", nil, "Source tables to exclude from copying.")
+ cmd.Flags().BoolVar(&createOptions.NoRoutingRules, "no-routing-rules", false, "(Advanced) Do not create routing rules while creating the workflow. See the reference documentation for limitations if you use this flag.")
+
+}
+
+func registerCommands(root *cobra.Command) {
+ common.AddCommonFlags(migrate)
+ root.AddCommand(migrate)
+ addCreateFlags(createCommand)
+ migrate.AddCommand(createCommand)
+ opts := &common.SubCommandsOpts{
+ SubCommand: "Migrate",
+ Workflow: "import",
+ }
+ migrate.AddCommand(common.GetCompleteCommand(opts))
+ migrate.AddCommand(common.GetCancelCommand(opts))
+ migrate.AddCommand(common.GetShowCommand(opts))
+ migrate.AddCommand(common.GetStatusCommand(opts))
+}
+
+func init() {
+ common.RegisterCommandHandler("Migrate", registerCommands)
+}
diff --git a/go/cmd/vtctldclient/command/vreplication/mount/mount.go b/go/cmd/vtctldclient/command/vreplication/mount/mount.go
new file mode 100644
index 00000000000..95ce3961e71
--- /dev/null
+++ b/go/cmd/vtctldclient/command/vreplication/mount/mount.go
@@ -0,0 +1,183 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package migrate
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/cmd/vtctldclient/cli"
+ "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common"
+
+ vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata"
+)
+
+var (
+ // base is the base command for all actions related to the mount action.
+ base = &cobra.Command{
+ Use: "Mount [command] [command-flags]",
+ Short: "Mount is used to link an external Vitess cluster in order to migrate data from it.",
+ DisableFlagsInUseLine: true,
+ Aliases: []string{"mount"},
+ Args: cobra.ExactArgs(1),
+ }
+)
+
+var mountOptions struct {
+ Name string
+ TopoType string
+ TopoServer string
+ TopoRoot string
+}
+
+var register = &cobra.Command{
+ Use: "register",
+ Short: "Register an external Vitess Cluster.",
+ Example: `vtctldclient --server localhost:15999 mount register --name ext1 --topo-type etcd2 --topo-server localhost:12379 --topo-root /vitess/global`,
+ DisableFlagsInUseLine: true,
+ Aliases: []string{"Register"},
+ Args: cobra.NoArgs,
+ RunE: commandRegister,
+}
+
+func commandRegister(cmd *cobra.Command, args []string) error {
+ cli.FinishedParsing(cmd)
+
+ req := &vtctldatapb.MountRegisterRequest{
+ Name: mountOptions.Name,
+ TopoType: mountOptions.TopoType,
+ TopoServer: mountOptions.TopoServer,
+ TopoRoot: mountOptions.TopoRoot,
+ }
+ _, err := common.GetClient().MountRegister(common.GetCommandCtx(), req)
+ if err != nil {
+ return err
+ }
+ fmt.Printf("Mount %s registered successfully\n", req.Name)
+ return nil
+}
+
+var unregister = &cobra.Command{
+ Use: "unregister",
+ Short: "Unregister a previously mounted external Vitess Cluster.",
+ Example: `vtctldclient --server localhost:15999 mount unregister --name ext1`,
+ DisableFlagsInUseLine: true,
+ Aliases: []string{"Unregister"},
+ Args: cobra.NoArgs,
+ RunE: commandUnregister,
+}
+
+func commandUnregister(cmd *cobra.Command, args []string) error {
+ cli.FinishedParsing(cmd)
+
+ req := &vtctldatapb.MountUnregisterRequest{
+ Name: mountOptions.Name,
+ }
+ _, err := common.GetClient().MountUnregister(common.GetCommandCtx(), req)
+ if err != nil {
+ return err
+ }
+ fmt.Printf("Mount %s unregistered successfully\n", req.Name)
+ return nil
+}
+
+var show = &cobra.Command{
+ Use: "show",
+ Short: "Show attributes of a previously mounted external Vitess Cluster.",
+ Example: `vtctldclient --server localhost:15999 mount show --name ext1`,
+ DisableFlagsInUseLine: true,
+ Aliases: []string{"Show"},
+ Args: cobra.NoArgs,
+ RunE: commandShow,
+}
+
+func commandShow(cmd *cobra.Command, args []string) error {
+ cli.FinishedParsing(cmd)
+
+ req := &vtctldatapb.MountShowRequest{
+ Name: mountOptions.Name,
+ }
+ resp, err := common.GetClient().MountShow(common.GetCommandCtx(), req)
+ if err != nil {
+ return err
+ }
+ data, err := json.Marshal(resp)
+ if err != nil {
+ return err
+ }
+ fmt.Printf("%s\n", string(data))
+ return nil
+}
+
+var list = &cobra.Command{
+ Use: "list",
+ Short: "List all mounted external Vitess Clusters.",
+ Example: `vtctldclient --server localhost:15999 mount list`,
+ DisableFlagsInUseLine: true,
+ Aliases: []string{"List"},
+ Args: cobra.NoArgs,
+ RunE: commandList,
+}
+
+func commandList(cmd *cobra.Command, args []string) error {
+ cli.FinishedParsing(cmd)
+
+ req := &vtctldatapb.MountListRequest{}
+ resp, err := common.GetClient().MountList(common.GetCommandCtx(), req)
+ if err != nil {
+ return err
+ }
+ if err != nil {
+ return err
+ }
+ data, err := json.Marshal(resp)
+ if err != nil {
+ return err
+ }
+ fmt.Printf("%s\n", string(data))
+ return nil
+}
+
+func registerCommands(root *cobra.Command) {
+ root.AddCommand(base)
+
+ register.Flags().StringVar(&mountOptions.Name, "name", "", "Name to use for the mount.")
+ register.MarkFlagRequired("name")
+ register.Flags().StringVar(&mountOptions.TopoType, "topo-type", "", "Topo server implementation to use.")
+ register.MarkFlagRequired("topo-type")
+ register.Flags().StringVar(&mountOptions.TopoServer, "topo-server", "", "Topo server address.")
+ register.MarkFlagRequired("topo-server")
+ register.Flags().StringVar(&mountOptions.TopoRoot, "topo-root", "", "Topo server root path.")
+ register.MarkFlagRequired("topo-root")
+ base.AddCommand(register)
+
+ unregister.Flags().StringVar(&mountOptions.Name, "name", "", "Name of the mount.")
+ unregister.MarkFlagRequired("name")
+ base.AddCommand(unregister)
+
+ show.Flags().StringVar(&mountOptions.Name, "name", "", "Name of the mount.")
+ show.MarkFlagRequired("name")
+ base.AddCommand(show)
+
+ base.AddCommand(list)
+}
+
+func init() {
+ common.RegisterCommandHandler("Mount", registerCommands)
+}
diff --git a/go/cmd/vtctldclient/command/vreplication/movetables/create.go b/go/cmd/vtctldclient/command/vreplication/movetables/create.go
new file mode 100644
index 00000000000..95c50f4f97e
--- /dev/null
+++ b/go/cmd/vtctldclient/command/vreplication/movetables/create.go
@@ -0,0 +1,122 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package movetables
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/cmd/vtctldclient/cli"
+ "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common"
+
+ vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata"
+)
+
+var (
+ createOptions = struct {
+ SourceKeyspace string
+ SourceShards []string
+ ExternalClusterName string
+ AllTables bool
+ IncludeTables []string
+ ExcludeTables []string
+ SourceTimeZone string
+ NoRoutingRules bool
+ AtomicCopy bool
+ }{}
+
+ // create makes a MoveTablesCreate gRPC call to a vtctld.
+ create = &cobra.Command{
+ Use: "create",
+ Short: "Create and optionally run a MoveTables VReplication workflow.",
+ Example: `vtctldclient --server localhost:15999 movetables --workflow commerce2customer --target-keyspace customer create --source-keyspace commerce --cells zone1 --cells zone2 --tablet-types replica`,
+ SilenceUsage: true,
+ DisableFlagsInUseLine: true,
+ Aliases: []string{"Create"},
+ Args: cobra.NoArgs,
+ PreRunE: func(cmd *cobra.Command, args []string) error {
+ // Either specific tables or the all tables flags are required.
+ if !cmd.Flags().Lookup("tables").Changed && !cmd.Flags().Lookup("all-tables").Changed {
+ return fmt.Errorf("tables or all-tables are required to specify which tables to move")
+ }
+ if err := common.ParseAndValidateCreateOptions(cmd); err != nil {
+ return err
+ }
+ checkAtomicCopyOptions := func() error {
+ var errors []string
+ if !createOptions.AtomicCopy {
+ return nil
+ }
+ if !createOptions.AllTables {
+ errors = append(errors, "atomic copy requires --all-tables")
+ }
+ if len(createOptions.IncludeTables) > 0 || len(createOptions.ExcludeTables) > 0 {
+ errors = append(errors, "atomic copy does not support specifying tables")
+ }
+ if len(errors) > 0 {
+ return fmt.Errorf("found options incompatible with atomic copy: %s", strings.Join(errors, ", "))
+ }
+ return nil
+ }
+ if err := checkAtomicCopyOptions(); err != nil {
+ return err
+ }
+ return nil
+ },
+ RunE: commandCreate,
+ }
+)
+
+func commandCreate(cmd *cobra.Command, args []string) error {
+ format, err := common.GetOutputFormat(cmd)
+ if err != nil {
+ return err
+ }
+ tsp := common.GetTabletSelectionPreference(cmd)
+ cli.FinishedParsing(cmd)
+
+ req := &vtctldatapb.MoveTablesCreateRequest{
+ Workflow: common.BaseOptions.Workflow,
+ TargetKeyspace: common.BaseOptions.TargetKeyspace,
+ SourceKeyspace: createOptions.SourceKeyspace,
+ SourceShards: createOptions.SourceShards,
+ SourceTimeZone: createOptions.SourceTimeZone,
+ Cells: common.CreateOptions.Cells,
+ TabletTypes: common.CreateOptions.TabletTypes,
+ TabletSelectionPreference: tsp,
+ AllTables: createOptions.AllTables,
+ IncludeTables: createOptions.IncludeTables,
+ ExcludeTables: createOptions.ExcludeTables,
+ OnDdl: common.CreateOptions.OnDDL,
+ DeferSecondaryKeys: common.CreateOptions.DeferSecondaryKeys,
+ AutoStart: common.CreateOptions.AutoStart,
+ StopAfterCopy: common.CreateOptions.StopAfterCopy,
+ NoRoutingRules: createOptions.NoRoutingRules,
+ AtomicCopy: createOptions.AtomicCopy,
+ }
+
+ resp, err := common.GetClient().MoveTablesCreate(common.GetCommandCtx(), req)
+ if err != nil {
+ return err
+ }
+ if err = common.OutputStatusResponse(resp, format); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/go/cmd/vtctldclient/command/vreplication/movetables/movetables.go b/go/cmd/vtctldclient/command/vreplication/movetables/movetables.go
new file mode 100644
index 00000000000..e2c7daed223
--- /dev/null
+++ b/go/cmd/vtctldclient/command/vreplication/movetables/movetables.go
@@ -0,0 +1,85 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package movetables
+
+import (
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common"
+)
+
+var (
+ // base is the base command for all actions related to MoveTables.
+ base = &cobra.Command{
+ Use: "MoveTables --workflow --target-keyspace [command] [command-flags]",
+ Short: "Perform commands related to moving tables from a source keyspace to a target keyspace.",
+ DisableFlagsInUseLine: true,
+ Aliases: []string{"movetables"},
+ Args: cobra.ExactArgs(1),
+ }
+)
+
+func registerCommands(root *cobra.Command) {
+ common.AddCommonFlags(base)
+ root.AddCommand(base)
+
+ common.AddCommonCreateFlags(create)
+ create.PersistentFlags().StringVar(&createOptions.SourceKeyspace, "source-keyspace", "", "Keyspace where the tables are being moved from.")
+ create.MarkPersistentFlagRequired("source-keyspace")
+ create.Flags().StringSliceVar(&createOptions.SourceShards, "source-shards", nil, "Source shards to copy data from when performing a partial MoveTables (experimental).")
+ create.Flags().StringVar(&createOptions.SourceTimeZone, "source-time-zone", "", "Specifying this causes any DATETIME fields to be converted from the given time zone into UTC.")
+ create.Flags().BoolVar(&createOptions.AllTables, "all-tables", false, "Copy all tables from the source.")
+ create.Flags().StringSliceVar(&createOptions.IncludeTables, "tables", nil, "Source tables to copy.")
+ create.Flags().StringSliceVar(&createOptions.ExcludeTables, "exclude-tables", nil, "Source tables to exclude from copying.")
+ create.Flags().BoolVar(&createOptions.NoRoutingRules, "no-routing-rules", false, "(Advanced) Do not create routing rules while creating the workflow. See the reference documentation for limitations if you use this flag.")
+ create.Flags().BoolVar(&createOptions.AtomicCopy, "atomic-copy", false, "(EXPERIMENTAL) A single copy phase is run for all tables from the source. Use this, for example, if your source keyspace has tables which use foreign key constraints.")
+ base.AddCommand(create)
+
+ opts := &common.SubCommandsOpts{
+ SubCommand: "MoveTables",
+ Workflow: "commerce2customer",
+ }
+ base.AddCommand(common.GetShowCommand(opts))
+ base.AddCommand(common.GetStatusCommand(opts))
+
+ base.AddCommand(common.GetStartCommand(opts))
+ base.AddCommand(common.GetStopCommand(opts))
+
+ switchTrafficCommand := common.GetSwitchTrafficCommand(opts)
+ common.AddCommonSwitchTrafficFlags(switchTrafficCommand, true)
+ base.AddCommand(switchTrafficCommand)
+
+ reverseTrafficCommand := common.GetReverseTrafficCommand(opts)
+ common.AddCommonSwitchTrafficFlags(reverseTrafficCommand, false)
+ base.AddCommand(reverseTrafficCommand)
+
+ complete := common.GetCompleteCommand(opts)
+ complete.Flags().BoolVar(&common.CompleteOptions.KeepData, "keep-data", false, "Keep the original source table data that was copied by the MoveTables workflow.")
+ complete.Flags().BoolVar(&common.CompleteOptions.KeepRoutingRules, "keep-routing-rules", false, "Keep the routing rules in place that direct table traffic from the source keyspace to the target keyspace of the MoveTables workflow.")
+ complete.Flags().BoolVar(&common.CompleteOptions.RenameTables, "rename-tables", false, "Keep the original source table data that was copied by the MoveTables workflow, but rename each table to '__old'.")
+ complete.Flags().BoolVar(&common.CompleteOptions.DryRun, "dry-run", false, "Print the actions that would be taken and report any known errors that would have occurred.")
+ base.AddCommand(complete)
+
+ cancel := common.GetCancelCommand(opts)
+ cancel.Flags().BoolVar(&common.CancelOptions.KeepData, "keep-data", false, "Keep the partially copied table data from the MoveTables workflow in the target keyspace.")
+ cancel.Flags().BoolVar(&common.CancelOptions.KeepRoutingRules, "keep-routing-rules", false, "Keep the routing rules created for the MoveTables workflow.")
+ base.AddCommand(cancel)
+}
+
+func init() {
+ common.RegisterCommandHandler("MoveTables", registerCommands)
+}
diff --git a/go/cmd/vtctldclient/command/vreplication/reshard/create.go b/go/cmd/vtctldclient/command/vreplication/reshard/create.go
new file mode 100644
index 00000000000..b8506ae61d0
--- /dev/null
+++ b/go/cmd/vtctldclient/command/vreplication/reshard/create.go
@@ -0,0 +1,94 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package reshard
+
+import (
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/cmd/vtctldclient/cli"
+ "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common"
+
+ vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata"
+)
+
+var (
+ reshardCreateOptions = struct {
+ sourceShards []string
+ targetShards []string
+ skipSchemaCopy bool
+ }{}
+
+ // reshardCreate makes a ReshardCreate gRPC call to a vtctld.
+ reshardCreate = &cobra.Command{
+ Use: "create",
+ Short: "Create and optionally run a Reshard VReplication workflow.",
+ Example: `vtctldclient --server localhost:15999 reshard --workflow customer2customer --target-keyspace customer create --source-shards="0" --target-shards="-80,80-" --cells zone1 --cells zone2 --tablet-types replica`,
+ SilenceUsage: true,
+ DisableFlagsInUseLine: true,
+ Aliases: []string{"Create"},
+ Args: cobra.NoArgs,
+ PreRunE: func(cmd *cobra.Command, args []string) error {
+ if err := common.ParseAndValidateCreateOptions(cmd); err != nil {
+ return err
+ }
+ return nil
+ },
+ RunE: commandReshardCreate,
+ }
+)
+
+func commandReshardCreate(cmd *cobra.Command, args []string) error {
+ format, err := common.GetOutputFormat(cmd)
+ if err != nil {
+ return err
+ }
+ tsp := common.GetTabletSelectionPreference(cmd)
+ cli.FinishedParsing(cmd)
+
+ req := &vtctldatapb.ReshardCreateRequest{
+ Workflow: common.BaseOptions.Workflow,
+ Keyspace: common.BaseOptions.TargetKeyspace,
+
+ TabletTypes: common.CreateOptions.TabletTypes,
+ TabletSelectionPreference: tsp,
+ Cells: common.CreateOptions.Cells,
+ OnDdl: common.CreateOptions.OnDDL,
+ DeferSecondaryKeys: common.CreateOptions.DeferSecondaryKeys,
+ AutoStart: common.CreateOptions.AutoStart,
+ StopAfterCopy: common.CreateOptions.StopAfterCopy,
+
+ SourceShards: reshardCreateOptions.sourceShards,
+ TargetShards: reshardCreateOptions.targetShards,
+ SkipSchemaCopy: reshardCreateOptions.skipSchemaCopy,
+ }
+ resp, err := common.GetClient().ReshardCreate(common.GetCommandCtx(), req)
+ if err != nil {
+ return err
+ }
+ if err = common.OutputStatusResponse(resp, format); err != nil {
+ return err
+ }
+ return nil
+}
+
+func registerCreateCommand(root *cobra.Command) {
+ common.AddCommonCreateFlags(reshardCreate)
+ reshardCreate.Flags().StringSliceVar(&reshardCreateOptions.sourceShards, "source-shards", nil, "Source shards.")
+ reshardCreate.Flags().StringSliceVar(&reshardCreateOptions.targetShards, "target-shards", nil, "Target shards.")
+ reshardCreate.Flags().BoolVar(&reshardCreateOptions.skipSchemaCopy, "skip-schema-copy", false, "Skip copying the schema from the source shards to the target shards.")
+ root.AddCommand(reshardCreate)
+}
diff --git a/go/cmd/vtctldclient/command/vreplication/reshard/reshard.go b/go/cmd/vtctldclient/command/vreplication/reshard/reshard.go
new file mode 100644
index 00000000000..4b266dbb370
--- /dev/null
+++ b/go/cmd/vtctldclient/command/vreplication/reshard/reshard.go
@@ -0,0 +1,65 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package reshard
+
+import (
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common"
+)
+
+var (
+ // reshard is the base command for all actions related to reshard.
+ reshard = &cobra.Command{
+ Use: "Reshard --workflow --target-keyspace [command] [command-flags]",
+ Short: "Perform commands related to resharding a keyspace.",
+ DisableFlagsInUseLine: true,
+ Aliases: []string{"reshard"},
+ Args: cobra.ExactArgs(1),
+ }
+)
+
+func registerReshardCommands(root *cobra.Command) {
+ common.AddCommonFlags(reshard)
+ root.AddCommand(reshard)
+
+ registerCreateCommand(reshard)
+ opts := &common.SubCommandsOpts{
+ SubCommand: "Reshard",
+ Workflow: "cust2cust",
+ }
+ reshard.AddCommand(common.GetShowCommand(opts))
+ reshard.AddCommand(common.GetStatusCommand(opts))
+
+ reshard.AddCommand(common.GetStartCommand(opts))
+ reshard.AddCommand(common.GetStopCommand(opts))
+
+ switchTrafficCommand := common.GetSwitchTrafficCommand(opts)
+ common.AddCommonSwitchTrafficFlags(switchTrafficCommand, false)
+ reshard.AddCommand(switchTrafficCommand)
+
+ reverseTrafficCommand := common.GetReverseTrafficCommand(opts)
+ common.AddCommonSwitchTrafficFlags(reverseTrafficCommand, false)
+ reshard.AddCommand(reverseTrafficCommand)
+
+ reshard.AddCommand(common.GetCompleteCommand(opts))
+ reshard.AddCommand(common.GetCancelCommand(opts))
+}
+
+func init() {
+ common.RegisterCommandHandler("Reshard", registerReshardCommands)
+}
diff --git a/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff.go b/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff.go
new file mode 100644
index 00000000000..a98cf3ad743
--- /dev/null
+++ b/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff.go
@@ -0,0 +1,887 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package vdiff
+
+import (
+ "encoding/json"
+ "fmt"
+ "html/template"
+ "io"
+ "math"
+ "reflect"
+ "sort"
+ "strings"
+ "time"
+
+ "github.com/bndr/gotabulate"
+ "github.com/google/uuid"
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/cmd/vtctldclient/cli"
+ "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common"
+ "vitess.io/vitess/go/protoutil"
+ "vitess.io/vitess/go/sqltypes"
+ "vitess.io/vitess/go/vt/vterrors"
+ "vitess.io/vitess/go/vt/vttablet/tabletmanager/vdiff"
+
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+ vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata"
+ vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
+ topoprotopb "vitess.io/vitess/go/vt/topo/topoproto"
+)
+
+var (
+ tabletTypesDefault = []topodatapb.TabletType{
+ topodatapb.TabletType_RDONLY,
+ topodatapb.TabletType_REPLICA,
+ topodatapb.TabletType_PRIMARY,
+ }
+
+ createOptions = struct {
+ UUID uuid.UUID
+ SourceCells []string
+ TargetCells []string
+ TabletTypes []topodatapb.TabletType
+ Tables []string
+ Limit uint32 // We only accept positive values but pass on an int64
+ FilteredReplicationWaitTime time.Duration
+ DebugQuery bool
+ OnlyPKs bool
+ UpdateTableStats bool
+ MaxExtraRowsToCompare uint32 // We only accept positive values but pass on an int64
+ Wait bool
+ WaitUpdateInterval time.Duration
+ AutoRetry bool
+ }{}
+
+ deleteOptions = struct {
+ Arg string
+ }{}
+
+ resumeOptions = struct {
+ UUID uuid.UUID
+ }{}
+
+ showOptions = struct {
+ Arg string
+ Verbose bool
+ }{}
+
+ stopOptions = struct {
+ UUID uuid.UUID
+ }{}
+
+ parseAndValidateCreate = func(cmd *cobra.Command, args []string) error {
+ var err error
+ if len(args) == 1 { // Validate UUID if provided
+ if createOptions.UUID, err = uuid.Parse(args[0]); err != nil {
+ return fmt.Errorf("invalid UUID provided: %v", err)
+ }
+ } else { // Generate a UUID
+ createOptions.UUID = uuid.New()
+ }
+ if !cmd.Flags().Lookup("tablet-types").Changed {
+ createOptions.TabletTypes = tabletTypesDefault
+ }
+ if cmd.Flags().Lookup("source-cells").Changed {
+ for i, cell := range createOptions.SourceCells {
+ createOptions.SourceCells[i] = strings.TrimSpace(cell)
+ }
+ }
+ if cmd.Flags().Lookup("target-cells").Changed {
+ for i, cell := range createOptions.TargetCells {
+ createOptions.TargetCells[i] = strings.TrimSpace(cell)
+ }
+ }
+ if cmd.Flags().Lookup("tables").Changed {
+ for i, table := range createOptions.Tables {
+ createOptions.Tables[i] = strings.TrimSpace(table)
+ }
+ }
+ return nil
+ }
+
+ // base is the base command for all actions related to VDiff.
+ base = &cobra.Command{
+ Use: "VDiff --workflow --target-keyspace [command] [command-flags]",
+ Short: "Perform commands related to diffing tables involved in a VReplication workflow between the source and target.",
+ DisableFlagsInUseLine: true,
+ Aliases: []string{"vdiff"},
+ Args: cobra.NoArgs,
+ }
+
+ // create makes a VDiffCreate gRPC call to a vtctld.
+ create = &cobra.Command{
+ Use: "create",
+ Short: "Create and run a VDiff to compare the tables involved in a VReplication workflow between the source and target.",
+ Example: `vtctldclient --server localhost:15999 vdiff --workflow commerce2customer --target-keyspace customer create
+vtctldclient --server localhost:15999 vdiff --workflow commerce2customer --target-keyspace customer create b3f59678-5241-11ee-be56-0242ac120002`,
+ SilenceUsage: true,
+ DisableFlagsInUseLine: true,
+ Aliases: []string{"Create"},
+ Args: cobra.MaximumNArgs(1),
+ PreRunE: parseAndValidateCreate,
+ RunE: commandCreate,
+ }
+
+ // delete makes a VDiffDelete gRPC call to a vtctld.
+ delete = &cobra.Command{
+ Use: "delete",
+ Short: "Delete VDiffs.",
+ Example: `vtctldclient --server localhost:15999 vdiff --workflow commerce2customer --target-keyspace delete a037a9e2-5628-11ee-8c99-0242ac120002
+vtctldclient --server localhost:15999 vdiff --workflow commerce2customer --target-keyspace delete all`,
+ DisableFlagsInUseLine: true,
+ Aliases: []string{"Delete"},
+ Args: cobra.ExactArgs(1),
+ PreRunE: func(cmd *cobra.Command, args []string) error {
+ larg := strings.ToLower(args[0])
+ switch larg {
+ case "all":
+ default:
+ if _, err := uuid.Parse(args[0]); err != nil {
+ return fmt.Errorf("invalid argument provided (%s), valid arguments are 'all' or a valid UUID",
+ args[0])
+ }
+ }
+ deleteOptions.Arg = larg
+ return nil
+ },
+ RunE: commandDelete,
+ }
+
+ // resume makes a VDiffResume gRPC call to a vtctld.
+ resume = &cobra.Command{
+ Use: "resume",
+ Short: "Resume a VDiff.",
+ Example: `vtctldclient --server localhost:15999 vdiff --workflow commerce2customer --target-keyspace resume a037a9e2-5628-11ee-8c99-0242ac120002`,
+ DisableFlagsInUseLine: true,
+ Aliases: []string{"Resume"},
+ Args: cobra.ExactArgs(1),
+ PreRunE: func(cmd *cobra.Command, args []string) error {
+ uuid, err := uuid.Parse(args[0])
+ if err != nil {
+ return fmt.Errorf("invalid UUID provided: %v", err)
+ }
+ resumeOptions.UUID = uuid
+ return nil
+ },
+ RunE: commandResume,
+ }
+
+ // show makes a VDiffShow gRPC call to a vtctld.
+ show = &cobra.Command{
+ Use: "show",
+ Short: "Show the status of a VDiff.",
+ Example: `vtctldclient --server localhost:15999 vdiff --workflow commerce2customer --target-keyspace show last
+vtctldclient --server localhost:15999 vdiff --workflow commerce2customer --target-keyspace show a037a9e2-5628-11ee-8c99-0242ac120002
+vtctldclient --server localhost:15999 vdiff --workflow commerce2customer --target-keyspace show all`,
+ DisableFlagsInUseLine: true,
+ Aliases: []string{"Show"},
+ Args: cobra.ExactArgs(1),
+ PreRunE: func(cmd *cobra.Command, args []string) error {
+ larg := strings.ToLower(args[0])
+ switch larg {
+ case "last", "all":
+ default:
+ if _, err := uuid.Parse(args[0]); err != nil {
+ return fmt.Errorf("invalid argument provided (%s), valid arguments are 'all', 'last', or a valid UUID",
+ args[0])
+ }
+ }
+ showOptions.Arg = larg
+ return nil
+ },
+ RunE: commandShow,
+ }
+
+ // stop makes a VDiffStop gRPC call to a vtctld.
+ stop = &cobra.Command{
+ Use: "stop",
+ Short: "Stop a running VDiff.",
+ Example: `vtctldclient --server localhost:15999 vdiff --workflow commerce2customer --target-keyspace stop a037a9e2-5628-11ee-8c99-0242ac120002`,
+ DisableFlagsInUseLine: true,
+ Aliases: []string{"Stop"},
+ Args: cobra.ExactArgs(1),
+ PreRunE: func(cmd *cobra.Command, args []string) error {
+ uuid, err := uuid.Parse(args[0])
+ if err != nil {
+ return fmt.Errorf("invalid UUID provided: %v", err)
+ }
+ stopOptions.UUID = uuid
+ return nil
+ },
+ RunE: commandStop,
+ }
+)
+
+type simpleResponse struct {
+ Action vdiff.VDiffAction
+ Status string
+}
+
+// displaySimpleResponse displays a simple standard response for the
+// resume, stop, and delete commands after the client command completes
+// without an error.
+func displaySimpleResponse(out io.Writer, format string, action vdiff.VDiffAction) {
+ status := "completed"
+ if action == vdiff.ResumeAction {
+ status = "scheduled"
+ }
+ if format == "json" {
+ resp := &simpleResponse{
+ Action: action,
+ Status: status,
+ }
+ jsonText, _ := cli.MarshalJSONPretty(resp)
+ fmt.Fprintln(out, string(jsonText))
+ } else {
+ fmt.Fprintf(out, "VDiff %s %s\n", action, status)
+ }
+}
+
+func commandCreate(cmd *cobra.Command, args []string) error {
+ format, err := common.GetOutputFormat(cmd)
+ if err != nil {
+ return err
+ }
+ tsp := common.GetTabletSelectionPreference(cmd)
+ cli.FinishedParsing(cmd)
+
+ resp, err := common.GetClient().VDiffCreate(common.GetCommandCtx(), &vtctldatapb.VDiffCreateRequest{
+ Workflow: common.BaseOptions.Workflow,
+ TargetKeyspace: common.BaseOptions.TargetKeyspace,
+ Uuid: createOptions.UUID.String(),
+ SourceCells: createOptions.SourceCells,
+ TargetCells: createOptions.TargetCells,
+ TabletTypes: createOptions.TabletTypes,
+ TabletSelectionPreference: tsp,
+ Tables: createOptions.Tables,
+ Limit: int64(createOptions.Limit),
+ FilteredReplicationWaitTime: protoutil.DurationToProto(createOptions.FilteredReplicationWaitTime),
+ DebugQuery: createOptions.DebugQuery,
+ OnlyPKs: createOptions.OnlyPKs,
+ UpdateTableStats: createOptions.UpdateTableStats,
+ MaxExtraRowsToCompare: int64(createOptions.MaxExtraRowsToCompare),
+ Wait: createOptions.Wait,
+ WaitUpdateInterval: protoutil.DurationToProto(createOptions.WaitUpdateInterval),
+ AutoRetry: createOptions.AutoRetry,
+ })
+
+ if err != nil {
+ return err
+ }
+
+ if createOptions.Wait {
+ tkr := time.NewTicker(createOptions.WaitUpdateInterval)
+ defer tkr.Stop()
+ var state vdiff.VDiffState
+ ctx := common.GetCommandCtx()
+ vtctldClient := common.GetClient()
+ uuidStr := createOptions.UUID.String()
+ for {
+ select {
+ case <-ctx.Done():
+ return vterrors.Errorf(vtrpcpb.Code_CANCELED, "context has expired")
+ case <-tkr.C:
+ resp, err := vtctldClient.VDiffShow(ctx, &vtctldatapb.VDiffShowRequest{
+ Workflow: common.BaseOptions.Workflow,
+ TargetKeyspace: common.BaseOptions.TargetKeyspace,
+ Arg: uuidStr,
+ })
+ if err != nil {
+ return err
+ }
+ if state, err = displayShowSingleSummary(cmd.OutOrStdout(), format, common.BaseOptions.TargetKeyspace, common.BaseOptions.Workflow, uuidStr, resp, false); err != nil {
+ return err
+ }
+ if state == vdiff.CompletedState {
+ return nil
+ }
+ }
+ }
+ } else {
+ var data []byte
+ if format == "json" {
+ data, err = cli.MarshalJSONPretty(resp)
+ if err != nil {
+ return err
+ }
+ } else {
+ data = []byte(fmt.Sprintf("VDiff %s scheduled on target shards, use show to view progress", resp.UUID))
+ }
+ fmt.Println(string(data))
+ }
+
+ return nil
+}
+
+func commandDelete(cmd *cobra.Command, args []string) error {
+ format, err := common.GetOutputFormat(cmd)
+ if err != nil {
+ return err
+ }
+ cli.FinishedParsing(cmd)
+
+ _, err = common.GetClient().VDiffDelete(common.GetCommandCtx(), &vtctldatapb.VDiffDeleteRequest{
+ Workflow: common.BaseOptions.Workflow,
+ TargetKeyspace: common.BaseOptions.TargetKeyspace,
+ Arg: deleteOptions.Arg,
+ })
+
+ if err != nil {
+ return err
+ }
+
+ displaySimpleResponse(cmd.OutOrStdout(), format, vdiff.DeleteAction)
+
+ return nil
+}
+
+func commandResume(cmd *cobra.Command, args []string) error {
+ format, err := common.GetOutputFormat(cmd)
+ if err != nil {
+ return err
+ }
+ cli.FinishedParsing(cmd)
+
+ _, err = common.GetClient().VDiffResume(common.GetCommandCtx(), &vtctldatapb.VDiffResumeRequest{
+ Workflow: common.BaseOptions.Workflow,
+ TargetKeyspace: common.BaseOptions.TargetKeyspace,
+ Uuid: resumeOptions.UUID.String(),
+ })
+
+ if err != nil {
+ return err
+ }
+
+ displaySimpleResponse(cmd.OutOrStdout(), format, vdiff.ResumeAction)
+
+ return nil
+}
+
+// tableSummary aggregates the current state of the table diff from all shards.
+type tableSummary struct {
+ TableName string
+ State vdiff.VDiffState
+ RowsCompared int64
+ MatchingRows int64
+ MismatchedRows int64
+ ExtraRowsSource int64
+ ExtraRowsTarget int64
+ LastUpdated string `json:"LastUpdated,omitempty"`
+}
+
+// summary aggregates the current state of the vdiff from all shards.
+type summary struct {
+ Workflow, Keyspace string
+ State vdiff.VDiffState
+ UUID string
+ RowsCompared int64
+ HasMismatch bool
+ Shards string
+ StartedAt string `json:"StartedAt,omitempty"`
+ CompletedAt string `json:"CompletedAt,omitempty"`
+ TableSummaryMap map[string]tableSummary `json:"TableSummary,omitempty"`
+ Reports map[string]map[string]vdiff.DiffReport `json:"Reports,omitempty"`
+ Errors map[string]string `json:"Errors,omitempty"`
+ Progress *vdiff.ProgressReport `json:"Progress,omitempty"`
+}
+
+const summaryTextTemplate = `
+VDiff Summary for {{.Keyspace}}.{{.Workflow}} ({{.UUID}})
+State: {{.State}}
+{{if .Errors}}
+{{- range $shard, $error := .Errors}}
+ Error: (shard {{$shard}}) {{$error}}
+{{- end}}
+{{end}}
+RowsCompared: {{.RowsCompared}}
+HasMismatch: {{.HasMismatch}}
+StartedAt: {{.StartedAt}}
+{{if (eq .State "started")}}Progress: {{printf "%.2f" .Progress.Percentage}}%%{{if .Progress.ETA}}, ETA: {{.Progress.ETA}}{{end}}{{end}}
+{{if .CompletedAt}}CompletedAt: {{.CompletedAt}}{{end}}
+{{range $table := .TableSummaryMap}}
+Table {{$table.TableName}}:
+ State: {{$table.State}}
+ ProcessedRows: {{$table.RowsCompared}}
+ MatchingRows: {{$table.MatchingRows}}
+{{if $table.MismatchedRows}} MismatchedRows: {{$table.MismatchedRows}}{{end}}
+{{if $table.ExtraRowsSource}} ExtraRowsSource: {{$table.ExtraRowsSource}}{{end}}
+{{if $table.ExtraRowsTarget}} ExtraRowsTarget: {{$table.ExtraRowsTarget}}{{end}}
+{{end}}
+
+Use "--format=json" for more detailed output.
+`
+
+type listing struct {
+ UUID, Workflow, Keyspace, Shard, State string
+}
+
+func (vdl *listing) String() string {
+ return fmt.Sprintf("UUID: %s, Workflow: %s, Keyspace: %s, Shard: %s, State: %s",
+ vdl.UUID, vdl.Workflow, vdl.Keyspace, vdl.Shard, vdl.State)
+}
+
+func getStructFieldNames(s any) []string {
+ t := reflect.TypeOf(s)
+
+ names := make([]string, t.NumField())
+ for i := range names {
+ names[i] = t.Field(i).Name
+ }
+
+ return names
+}
+
+func buildListings(listings []*listing) string {
+ var values []string
+ var lines [][]string
+ var result string
+
+ if len(listings) == 0 {
+ return ""
+ }
+ // Get the column headers.
+ fields := getStructFieldNames(listing{})
+ // The header is the first row.
+ lines = append(lines, fields)
+ for _, listing := range listings {
+ v := reflect.ValueOf(*listing)
+ for _, field := range fields {
+ values = append(values, v.FieldByName(field).String())
+ }
+ lines = append(lines, values)
+ }
+ t := gotabulate.Create(lines)
+ result = t.Render("grid")
+ return result
+}
+
+func displayShowResponse(out io.Writer, format, keyspace, workflowName, actionArg string, resp *vtctldatapb.VDiffShowResponse, verbose bool) error {
+ var vdiffUUID uuid.UUID
+ var err error
+ switch actionArg {
+ case vdiff.AllActionArg:
+ return displayShowRecent(out, format, keyspace, workflowName, actionArg, resp)
+ case vdiff.LastActionArg:
+ for _, resp := range resp.TabletResponses {
+ vdiffUUID, err = uuid.Parse(resp.VdiffUuid)
+ if err != nil {
+ if format == "json" {
+ fmt.Fprintln(out, "{}")
+ } else {
+ fmt.Fprintf(out, "No previous vdiff found for %s.%s\n", keyspace, workflowName)
+ }
+ return nil
+ }
+ break
+ }
+ fallthrough
+ default:
+ if vdiffUUID == uuid.Nil { // Then it must be passed as the action arg
+ vdiffUUID, err = uuid.Parse(actionArg)
+ if err != nil {
+ return err
+ }
+ }
+ if len(resp.TabletResponses) == 0 {
+ return fmt.Errorf("no response received for vdiff show of %s.%s (%s)", keyspace, workflowName, vdiffUUID.String())
+ }
+ _, err = displayShowSingleSummary(out, format, keyspace, workflowName, vdiffUUID.String(), resp, verbose)
+ return err
+ }
+}
+
+func displayShowRecent(out io.Writer, format, keyspace, workflowName, subCommand string, resp *vtctldatapb.VDiffShowResponse) error {
+ output := ""
+ recentListings, err := buildRecentListings(resp)
+ if err != nil {
+ return err
+ }
+ if format == "json" {
+ jsonText, err := cli.MarshalJSONPretty(recentListings)
+ if err != nil {
+ return err
+ }
+ output = string(jsonText)
+ if output == "null" {
+ output = "[]"
+ }
+ } else {
+ output = buildListings(recentListings)
+ if output == "" {
+ output = fmt.Sprintf("No vdiffs found for %s.%s", keyspace, workflowName)
+ }
+ }
+ fmt.Fprintln(out, output)
+ return nil
+}
+
+func buildRecentListings(resp *vtctldatapb.VDiffShowResponse) ([]*listing, error) {
+ var listings []*listing
+ for _, resp := range resp.TabletResponses {
+ if resp != nil && resp.Output != nil {
+ qr := sqltypes.Proto3ToResult(resp.Output)
+ for _, row := range qr.Named().Rows {
+ listings = append(listings, &listing{
+ UUID: row["vdiff_uuid"].ToString(),
+ Workflow: row["workflow"].ToString(),
+ Keyspace: row["keyspace"].ToString(),
+ Shard: row["shard"].ToString(),
+ State: row["state"].ToString(),
+ })
+ }
+ }
+ }
+ return listings, nil
+}
+
+func displayShowSingleSummary(out io.Writer, format, keyspace, workflowName, uuid string, resp *vtctldatapb.VDiffShowResponse, verbose bool) (vdiff.VDiffState, error) {
+ state := vdiff.UnknownState
+ var output string
+ summary, err := buildSingleSummary(keyspace, workflowName, uuid, resp, verbose)
+ if err != nil {
+ return state, err
+ }
+ if summary == nil { // Should never happen
+ return state, fmt.Errorf("no report to show for vdiff %s.%s (%s)", keyspace, workflowName, uuid)
+ }
+ state = summary.State
+ if format == "json" {
+ jsonText, err := cli.MarshalJSONPretty(summary)
+ if err != nil {
+ return state, err
+ }
+ output = string(jsonText)
+ } else {
+ tmpl, err := template.New("summary").Parse(summaryTextTemplate)
+ if err != nil {
+ return state, err
+ }
+ sb := new(strings.Builder)
+ err = tmpl.Execute(sb, summary)
+ if err != nil {
+ return state, err
+ }
+ output = sb.String()
+ for {
+ str := strings.Replace(output, "\n\n", "\n", -1)
+ if output == str {
+ break
+ }
+ output = str
+ }
+ }
+ fmt.Fprintln(out, output)
+ return state, nil
+}
+
+func buildSingleSummary(keyspace, workflow, uuid string, resp *vtctldatapb.VDiffShowResponse, verbose bool) (*summary, error) {
+ summary := &summary{
+ Workflow: workflow,
+ Keyspace: keyspace,
+ UUID: uuid,
+ State: vdiff.UnknownState,
+ RowsCompared: 0,
+ StartedAt: "",
+ CompletedAt: "",
+ HasMismatch: false,
+ Shards: "",
+ Reports: make(map[string]map[string]vdiff.DiffReport),
+ Errors: make(map[string]string),
+ Progress: nil,
+ }
+
+ var tableSummaryMap map[string]tableSummary
+ var reports map[string]map[string]vdiff.DiffReport
+ // Keep a tally of the states across all tables in all shards.
+ tableStateCounts := map[vdiff.VDiffState]int{
+ vdiff.UnknownState: 0,
+ vdiff.PendingState: 0,
+ vdiff.StartedState: 0,
+ vdiff.StoppedState: 0,
+ vdiff.ErrorState: 0,
+ vdiff.CompletedState: 0,
+ }
+ // Keep a tally of the summary states across all shards.
+ shardStateCounts := map[vdiff.VDiffState]int{
+ vdiff.UnknownState: 0,
+ vdiff.PendingState: 0,
+ vdiff.StartedState: 0,
+ vdiff.StoppedState: 0,
+ vdiff.ErrorState: 0,
+ vdiff.CompletedState: 0,
+ }
+ // Keep a tally of the approximate total rows to process as we'll use this for our progress
+ // report.
+ totalRowsToCompare := int64(0)
+ var shards []string
+ for shard, resp := range resp.TabletResponses {
+ first := true
+ if resp != nil && resp.Output != nil {
+ shards = append(shards, shard)
+ qr := sqltypes.Proto3ToResult(resp.Output)
+ if tableSummaryMap == nil {
+ tableSummaryMap = make(map[string]tableSummary, 0)
+ reports = make(map[string]map[string]vdiff.DiffReport, 0)
+ }
+ for _, row := range qr.Named().Rows {
+ // Update the global VDiff summary based on the per shard level summary.
+ // Since these values will be the same for all subsequent rows we only use
+ // the first row.
+ if first {
+ first = false
+ // Our timestamps are strings in `2022-06-26 20:43:25` format so we sort
+ // them lexicographically.
+ // We should use the earliest started_at across all shards.
+ if sa := row.AsString("started_at", ""); summary.StartedAt == "" || sa < summary.StartedAt {
+ summary.StartedAt = sa
+ }
+ // And we should use the latest completed_at across all shards.
+ if ca := row.AsString("completed_at", ""); summary.CompletedAt == "" || ca > summary.CompletedAt {
+ summary.CompletedAt = ca
+ }
+ // If we had an error on the shard, then let's add that to the summary.
+ if le := row.AsString("last_error", ""); le != "" {
+ summary.Errors[shard] = le
+ }
+ // Keep track of how many shards are marked as a specific state. We check
+ // this combined with the shard.table states to determine the VDiff summary
+ // state.
+ shardStateCounts[vdiff.VDiffState(strings.ToLower(row.AsString("vdiff_state", "")))]++
+ }
+
+ // Global VDiff summary updates that take into account the per table details
+ // per shard.
+ {
+ summary.RowsCompared += row.AsInt64("rows_compared", 0)
+ totalRowsToCompare += row.AsInt64("table_rows", 0)
+
+ // If we had a mismatch on any table on any shard then the global VDiff
+ // summary does too.
+ if mm, _ := row.ToBool("has_mismatch"); mm {
+ summary.HasMismatch = true
+ }
+ }
+
+ // Table summary information that must be accounted for across all shards.
+ {
+ table := row.AsString("table_name", "")
+ // Create the global VDiff table summary object if it doesn't exist.
+ if _, ok := tableSummaryMap[table]; !ok {
+ tableSummaryMap[table] = tableSummary{
+ TableName: table,
+ State: vdiff.UnknownState,
+ }
+
+ }
+ ts := tableSummaryMap[table]
+ // This is the shard level VDiff table state.
+ sts := vdiff.VDiffState(strings.ToLower(row.AsString("table_state", "")))
+ tableStateCounts[sts]++
+
+ // The error state must be sticky, and we should not override any other
+ // known state with completed.
+ switch sts {
+ case vdiff.CompletedState:
+ if ts.State == vdiff.UnknownState {
+ ts.State = sts
+ }
+ case vdiff.ErrorState:
+ ts.State = sts
+ default:
+ if ts.State != vdiff.ErrorState {
+ ts.State = sts
+ }
+ }
+
+ diffReport := row.AsString("report", "")
+ dr := vdiff.DiffReport{}
+ if diffReport != "" {
+ err := json.Unmarshal([]byte(diffReport), &dr)
+ if err != nil {
+ return nil, err
+ }
+ ts.RowsCompared += dr.ProcessedRows
+ ts.MismatchedRows += dr.MismatchedRows
+ ts.MatchingRows += dr.MatchingRows
+ ts.ExtraRowsTarget += dr.ExtraRowsTarget
+ ts.ExtraRowsSource += dr.ExtraRowsSource
+ }
+ if _, ok := reports[table]; !ok {
+ reports[table] = make(map[string]vdiff.DiffReport)
+ }
+
+ reports[table][shard] = dr
+ tableSummaryMap[table] = ts
+ }
+ }
+ }
+ }
+
+ // The global VDiff summary should progress from pending->started->completed with
+ // stopped for any shard and error for any table being sticky for the global summary.
+ // We should only consider the VDiff to be complete if it's completed for every table
+ // on every shard.
+ if shardStateCounts[vdiff.StoppedState] > 0 {
+ summary.State = vdiff.StoppedState
+ } else if shardStateCounts[vdiff.ErrorState] > 0 || tableStateCounts[vdiff.ErrorState] > 0 {
+ summary.State = vdiff.ErrorState
+ } else if tableStateCounts[vdiff.StartedState] > 0 {
+ summary.State = vdiff.StartedState
+ } else if tableStateCounts[vdiff.PendingState] > 0 {
+ summary.State = vdiff.PendingState
+ } else if tableStateCounts[vdiff.CompletedState] == (len(tableSummaryMap) * len(shards)) {
+ // When doing shard consolidations/merges, we cannot rely solely on the
+ // vdiff_table state as there are N sources that we process rows from sequentially
+ // with each one writing to the shared _vt.vdiff_table record for the target shard.
+ // So we only mark the vdiff for the shard as completed when we've finished
+ // processing rows from all of the sources -- which is recorded by marking the
+ // vdiff done for the shard by setting _vt.vdiff.state = completed.
+ if shardStateCounts[vdiff.CompletedState] == len(shards) {
+ summary.State = vdiff.CompletedState
+ } else {
+ summary.State = vdiff.StartedState
+ }
+ } else {
+ summary.State = vdiff.UnknownState
+ }
+
+ // If the vdiff has been started then we can calculate the progress.
+ if summary.State == vdiff.StartedState {
+ buildProgressReport(summary, totalRowsToCompare)
+ }
+
+ sort.Strings(shards) // Sort for predictable output
+ summary.Shards = strings.Join(shards, ",")
+ summary.TableSummaryMap = tableSummaryMap
+ summary.Reports = reports
+ if !summary.HasMismatch && !verbose {
+ summary.Reports = nil
+ summary.TableSummaryMap = nil
+ }
+ // If we haven't completed the global VDiff then be sure to reflect that with no
+ // CompletedAt value.
+ if summary.State != vdiff.CompletedState {
+ summary.CompletedAt = ""
+ }
+ return summary, nil
+}
+
+func buildProgressReport(summary *summary, rowsToCompare int64) {
+ report := &vdiff.ProgressReport{}
+ if summary.RowsCompared >= 1 {
+ // Round to 2 decimal points.
+ report.Percentage = math.Round(math.Min((float64(summary.RowsCompared)/float64(rowsToCompare))*100, 100.00)*100) / 100
+ }
+ if math.IsNaN(report.Percentage) {
+ report.Percentage = 0
+ }
+ pctToGo := math.Abs(report.Percentage - 100.00)
+ startTime, _ := time.Parse(vdiff.TimestampFormat, summary.StartedAt)
+ curTime := time.Now().UTC()
+ runTime := curTime.Unix() - startTime.Unix()
+ if report.Percentage >= 1 {
+ // Calculate how long 1% took, on avg, and multiply that by the % left.
+ eta := time.Unix(((int64(runTime)/int64(report.Percentage))*int64(pctToGo))+curTime.Unix(), 1).UTC()
+ // Cap the ETA at 1 year out to prevent providing nonsensical ETAs.
+ if eta.Before(time.Now().UTC().AddDate(1, 0, 0)) {
+ report.ETA = eta.Format(vdiff.TimestampFormat)
+ }
+ }
+ summary.Progress = report
+}
+
+func commandShow(cmd *cobra.Command, args []string) error {
+ format, err := common.GetOutputFormat(cmd)
+ if err != nil {
+ return err
+ }
+ cli.FinishedParsing(cmd)
+
+ resp, err := common.GetClient().VDiffShow(common.GetCommandCtx(), &vtctldatapb.VDiffShowRequest{
+ Workflow: common.BaseOptions.Workflow,
+ TargetKeyspace: common.BaseOptions.TargetKeyspace,
+ Arg: showOptions.Arg,
+ })
+
+ if err != nil {
+ return err
+ }
+
+ if err = displayShowResponse(cmd.OutOrStdout(), format, common.BaseOptions.TargetKeyspace, common.BaseOptions.Workflow, showOptions.Arg, resp, showOptions.Verbose); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+func commandStop(cmd *cobra.Command, args []string) error {
+ format, err := common.GetOutputFormat(cmd)
+ if err != nil {
+ return err
+ }
+ cli.FinishedParsing(cmd)
+
+ _, err = common.GetClient().VDiffStop(common.GetCommandCtx(), &vtctldatapb.VDiffStopRequest{
+ Workflow: common.BaseOptions.Workflow,
+ TargetKeyspace: common.BaseOptions.TargetKeyspace,
+ Uuid: stopOptions.UUID.String(),
+ })
+
+ if err != nil {
+ return err
+ }
+
+ displaySimpleResponse(cmd.OutOrStdout(), format, vdiff.StopAction)
+
+ return nil
+}
+
+func registerCommands(root *cobra.Command) {
+ common.AddCommonFlags(base)
+ root.AddCommand(base)
+
+ create.Flags().StringSliceVar(&createOptions.SourceCells, "source-cells", nil, "The source cell(s) to compare from; default is any available cell.")
+ create.Flags().StringSliceVar(&createOptions.TargetCells, "target-cells", nil, "The target cell(s) to compare with; default is any available cell.")
+ create.Flags().Var((*topoprotopb.TabletTypeListFlag)(&createOptions.TabletTypes), "tablet-types", "Tablet types to use on the source and target.")
+ create.Flags().BoolVar(&common.CreateOptions.TabletTypesInPreferenceOrder, "tablet-types-in-preference-order", true, "When performing source tablet selection, look for candidates in the type order as they are listed in the tablet-types flag.")
+ create.Flags().DurationVar(&createOptions.FilteredReplicationWaitTime, "filtered-replication-wait-time", 30*time.Second, "Specifies the maximum time to wait, in seconds, for replication to catch up when syncing tablet streams.")
+ create.Flags().Uint32Var(&createOptions.Limit, "limit", math.MaxUint32, "Max rows to stop comparing after.")
+ create.Flags().BoolVar(&createOptions.DebugQuery, "debug-query", false, "Adds a mysql query to the report that can be used for further debugging.")
+ create.Flags().BoolVar(&createOptions.OnlyPKs, "only-pks", false, "When reporting missing rows, only show primary keys in the report.")
+ create.Flags().StringSliceVar(&createOptions.Tables, "tables", nil, "Only run vdiff for these tables in the workflow.")
+ create.Flags().Uint32Var(&createOptions.MaxExtraRowsToCompare, "max-extra-rows-to-compare", 1000, "If there are collation differences between the source and target, you can have rows that are identical but simply returned in a different order from MySQL. We will do a second pass to compare the rows for any actual differences in this case and this flag allows you to control the resources used for this operation.")
+ create.Flags().BoolVar(&createOptions.Wait, "wait", false, "When creating or resuming a vdiff, wait for it to finish before exiting.")
+ create.Flags().DurationVar(&createOptions.WaitUpdateInterval, "wait-update-interval", time.Duration(1*time.Minute), "When waiting on a vdiff to finish, check and display the current status this often.")
+ create.Flags().BoolVar(&createOptions.AutoRetry, "auto-retry", true, "Should this vdiff automatically retry and continue in case of recoverable errors.")
+ create.Flags().BoolVar(&createOptions.UpdateTableStats, "update-table-stats", false, "Update the table statistics, using ANALYZE TABLE, on each table involved in the VDiff during initialization. This will ensure that progress estimates are as accurate as possible -- but it does involve locks and can potentially impact query processing on the target keyspace.")
+ base.AddCommand(create)
+
+ base.AddCommand(delete)
+
+ base.AddCommand(resume)
+
+ show.Flags().BoolVar(&showOptions.Verbose, "verbose", false, "Show verbose output in summaries")
+ base.AddCommand(show)
+
+ base.AddCommand(stop)
+}
+
+func init() {
+ common.RegisterCommandHandler("VDiff", registerCommands)
+}
diff --git a/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff_env_test.go b/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff_env_test.go
new file mode 100644
index 00000000000..1a2a374cf81
--- /dev/null
+++ b/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff_env_test.go
@@ -0,0 +1,351 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package vdiff
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "io"
+ "math/rand"
+ "sync"
+ "testing"
+
+ "vitess.io/vitess/go/sqltypes"
+ "vitess.io/vitess/go/vt/grpcclient"
+ "vitess.io/vitess/go/vt/topo"
+ "vitess.io/vitess/go/vt/topo/memorytopo"
+ "vitess.io/vitess/go/vt/vtctl/workflow"
+ "vitess.io/vitess/go/vt/vttablet/queryservice"
+ "vitess.io/vitess/go/vt/vttablet/queryservice/fakes"
+ "vitess.io/vitess/go/vt/vttablet/tabletconn"
+ "vitess.io/vitess/go/vt/vttablet/tabletconntest"
+ "vitess.io/vitess/go/vt/vttablet/tmclient"
+
+ binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata"
+ querypb "vitess.io/vitess/go/vt/proto/query"
+ tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata"
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+)
+
+const (
+ // vdiffStopPosition is the default stop position for the target vreplication.
+ // It can be overridden with the positons argument to newTestVDiffEnv.
+ vdiffStopPosition = "MySQL56/d834e6b8-7cbf-11ed-a1eb-0242ac120002:1-892"
+ // vdiffSourceGtid should be the position reported by the source side VStreamResults.
+ // It's expected to be higher the vdiffStopPosition.
+ vdiffSourceGtid = "MySQL56/d834e6b8-7cbf-11ed-a1eb-0242ac120002:1-893"
+ // vdiffTargetPrimaryPosition is the primary position of the target after
+ // vreplication has been synchronized.
+ vdiffTargetPrimaryPosition = "MySQL56/e34d6fb6-7cbf-11ed-a1eb-0242ac120002:1-892"
+)
+
+type testVDiffEnv struct {
+ ws *workflow.Server
+ sourceKeyspace string
+ targetKeyspace string
+ workflow string
+ topoServ *topo.Server
+ cell string
+ tabletType topodatapb.TabletType
+ tmc *testVDiffTMClient
+ out io.Writer // Capture command output
+
+ mu sync.Mutex
+ tablets map[int]*testVDiffTablet
+}
+
+//----------------------------------------------
+// testVDiffEnv
+
+func newTestVDiffEnv(t testing.TB, ctx context.Context, sourceShards, targetShards []string, query string, positions map[string]string) *testVDiffEnv {
+ env := &testVDiffEnv{
+ sourceKeyspace: "sourceks",
+ targetKeyspace: "targetks",
+ workflow: "vdiffTest",
+ tablets: make(map[int]*testVDiffTablet),
+ topoServ: memorytopo.NewServer(ctx, "cell"),
+ cell: "cell",
+ tabletType: topodatapb.TabletType_REPLICA,
+ tmc: newTestVDiffTMClient(),
+ }
+ env.ws = workflow.NewServer(env.topoServ, env.tmc)
+ env.tmc.testEnv = env
+
+ // Generate a unique dialer name.
+ dialerName := fmt.Sprintf("VDiffTest-%s-%d", t.Name(), rand.Intn(1000000000))
+ tabletconn.RegisterDialer(dialerName, func(tablet *topodatapb.Tablet, failFast grpcclient.FailFast) (queryservice.QueryService, error) {
+ env.mu.Lock()
+ defer env.mu.Unlock()
+ if qs, ok := env.tablets[int(tablet.Alias.Uid)]; ok {
+ return qs, nil
+ }
+ return nil, fmt.Errorf("tablet %d not found", tablet.Alias.Uid)
+ })
+ tabletconntest.SetProtocol("go.cmd.vtctldclient.vreplication.vdiff_env_test", dialerName)
+
+ tabletID := 100
+ for _, shard := range sourceShards {
+ _ = env.addTablet(tabletID, env.sourceKeyspace, shard, topodatapb.TabletType_PRIMARY)
+ env.tmc.waitpos[tabletID+1] = vdiffStopPosition
+
+ tabletID += 10
+ }
+ tabletID = 200
+ for _, shard := range targetShards {
+ primary := env.addTablet(tabletID, env.targetKeyspace, shard, topodatapb.TabletType_PRIMARY)
+
+ var rows []string
+ var posRows []string
+ for j, sourceShard := range sourceShards {
+ bls := &binlogdatapb.BinlogSource{
+ Keyspace: env.sourceKeyspace,
+ Shard: sourceShard,
+ Filter: &binlogdatapb.Filter{
+ Rules: []*binlogdatapb.Rule{{
+ Match: "t1",
+ Filter: query,
+ }},
+ },
+ }
+ rows = append(rows, fmt.Sprintf("%d|%v|||", j+1, bls))
+ position := vdiffStopPosition
+ if pos := positions[sourceShard+shard]; pos != "" {
+ position = pos
+ }
+ posRows = append(posRows, fmt.Sprintf("%v|%s", bls, position))
+
+ // vdiff.syncTargets. This actually happens after stopTargets.
+ // But this is one statement per stream.
+ env.tmc.setVRResults(
+ primary.tablet,
+ fmt.Sprintf("update _vt.vreplication set state='Running', stop_pos='%s', message='synchronizing for vdiff' where id=%d", vdiffSourceGtid, j+1),
+ &sqltypes.Result{},
+ )
+ }
+ // migrater buildMigrationTargets
+ env.tmc.setVRResults(
+ primary.tablet,
+ "select id, source, message, cell, tablet_types, workflow_type, workflow_sub_type, defer_secondary_keys from _vt.vreplication where workflow='vdiffTest' and db_name='vt_target'",
+ sqltypes.MakeTestResult(sqltypes.MakeTestFields(
+ "id|source|message|cell|tablet_types|workflow_type|workflow_sub_type|defer_secondary_keys",
+ "int64|varchar|varchar|varchar|varchar|int64|int64|int64"),
+ rows...,
+ ),
+ )
+
+ // vdiff.stopTargets
+ env.tmc.setVRResults(primary.tablet, "update _vt.vreplication set state='Stopped', message='for vdiff' where db_name='vt_target' and workflow='vdiffTest'", &sqltypes.Result{})
+ env.tmc.setVRResults(
+ primary.tablet,
+ "select source, pos from _vt.vreplication where db_name='vt_target' and workflow='vdiffTest'",
+ sqltypes.MakeTestResult(sqltypes.MakeTestFields(
+ "source|pos",
+ "varchar|varchar"),
+ posRows...,
+ ),
+ )
+
+ // vdiff.syncTargets (continued)
+ env.tmc.vrpos[tabletID] = vdiffSourceGtid
+ env.tmc.pos[tabletID] = vdiffTargetPrimaryPosition
+
+ // vdiff.startQueryStreams
+ env.tmc.waitpos[tabletID+1] = vdiffTargetPrimaryPosition
+
+ // vdiff.restartTargets
+ env.tmc.setVRResults(primary.tablet, "update _vt.vreplication set state='Running', message='', stop_pos='' where db_name='vt_target' and workflow='vdiffTest'", &sqltypes.Result{})
+
+ tabletID += 10
+ }
+ env.resetOutput()
+ return env
+}
+
+func (env *testVDiffEnv) getOutput() string {
+ env.mu.Lock()
+ defer env.mu.Unlock()
+ bb, ok := env.out.(*bytes.Buffer)
+ if !ok {
+ panic(fmt.Sprintf("unexpected output type for test env: %T", env.out))
+ }
+ return bb.String()
+}
+
+func (env *testVDiffEnv) resetOutput() {
+ env.mu.Lock()
+ defer env.mu.Unlock()
+ env.out = &bytes.Buffer{}
+}
+
+func (env *testVDiffEnv) close() {
+ env.mu.Lock()
+ defer env.mu.Unlock()
+ for _, t := range env.tablets {
+ _ = env.topoServ.DeleteTablet(context.Background(), t.tablet.Alias)
+ }
+ env.tablets = nil
+ env.topoServ.Close()
+ env.ws = nil
+}
+
+func (env *testVDiffEnv) addTablet(id int, keyspace, shard string, tabletType topodatapb.TabletType) *testVDiffTablet {
+ env.mu.Lock()
+ defer env.mu.Unlock()
+ tablet := &topodatapb.Tablet{
+ Alias: &topodatapb.TabletAlias{
+ Cell: env.cell,
+ Uid: uint32(id),
+ },
+ Keyspace: keyspace,
+ Shard: shard,
+ KeyRange: &topodatapb.KeyRange{},
+ Type: tabletType,
+ PortMap: map[string]int32{
+ "test": int32(id),
+ },
+ }
+ env.tablets[id] = newTestVDiffTablet(tablet)
+ if err := env.topoServ.InitTablet(context.Background(), tablet, false /* allowPrimaryOverride */, true /* createShardAndKeyspace */, false /* allowUpdate */); err != nil {
+ panic(err)
+ }
+ if tabletType == topodatapb.TabletType_PRIMARY {
+ _, err := env.topoServ.UpdateShardFields(context.Background(), keyspace, shard, func(si *topo.ShardInfo) error {
+ si.PrimaryAlias = tablet.Alias
+ return nil
+ })
+ if err != nil {
+ panic(err)
+ }
+ }
+ return env.tablets[id]
+}
+
+//----------------------------------------------
+// testVDiffTablet
+
+type testVDiffTablet struct {
+ queryservice.QueryService
+ tablet *topodatapb.Tablet
+}
+
+func newTestVDiffTablet(tablet *topodatapb.Tablet) *testVDiffTablet {
+ return &testVDiffTablet{
+ QueryService: fakes.ErrorQueryService,
+ tablet: tablet,
+ }
+}
+
+func (tvt *testVDiffTablet) StreamHealth(ctx context.Context, callback func(*querypb.StreamHealthResponse) error) error {
+ return callback(&querypb.StreamHealthResponse{
+ Serving: true,
+ Target: &querypb.Target{
+ Keyspace: tvt.tablet.Keyspace,
+ Shard: tvt.tablet.Shard,
+ TabletType: tvt.tablet.Type,
+ },
+ RealtimeStats: &querypb.RealtimeStats{},
+ })
+}
+
+//----------------------------------------------
+// testVDiffTMCclient
+
+type testVDiffTMClient struct {
+ tmclient.TabletManagerClient
+ vrQueries map[int]map[string]*querypb.QueryResult
+ vdRequests map[int]map[string]*tabletmanagerdatapb.VDiffResponse
+ waitpos map[int]string
+ vrpos map[int]string
+ pos map[int]string
+
+ testEnv *testVDiffEnv // For access to the test environment
+}
+
+func newTestVDiffTMClient() *testVDiffTMClient {
+ return &testVDiffTMClient{
+ vrQueries: make(map[int]map[string]*querypb.QueryResult),
+ vdRequests: make(map[int]map[string]*tabletmanagerdatapb.VDiffResponse),
+ waitpos: make(map[int]string),
+ vrpos: make(map[int]string),
+ pos: make(map[int]string),
+ }
+}
+
+func (tmc *testVDiffTMClient) setVRResults(tablet *topodatapb.Tablet, query string, result *sqltypes.Result) {
+ queries, ok := tmc.vrQueries[int(tablet.Alias.Uid)]
+ if !ok {
+ queries = make(map[string]*querypb.QueryResult)
+ tmc.vrQueries[int(tablet.Alias.Uid)] = queries
+ }
+ queries[query] = sqltypes.ResultToProto3(result)
+}
+
+func (tmc *testVDiffTMClient) VReplicationExec(ctx context.Context, tablet *topodatapb.Tablet, query string) (*querypb.QueryResult, error) {
+ result, ok := tmc.vrQueries[int(tablet.Alias.Uid)][query]
+ if !ok {
+ return nil, fmt.Errorf("query %q not found for tablet %d", query, tablet.Alias.Uid)
+ }
+ return result, nil
+}
+
+func (tmc *testVDiffTMClient) setVDResults(tablet *topodatapb.Tablet, req *tabletmanagerdatapb.VDiffRequest, res *tabletmanagerdatapb.VDiffResponse) {
+ reqs, ok := tmc.vdRequests[int(tablet.Alias.Uid)]
+ if !ok {
+ reqs = make(map[string]*tabletmanagerdatapb.VDiffResponse)
+ tmc.vdRequests[int(tablet.Alias.Uid)] = reqs
+ }
+ reqs[req.VdiffUuid] = res
+}
+
+func (tmc *testVDiffTMClient) VDiff(ctx context.Context, tablet *topodatapb.Tablet, req *tabletmanagerdatapb.VDiffRequest) (*tabletmanagerdatapb.VDiffResponse, error) {
+ resp, ok := tmc.vdRequests[int(tablet.Alias.Uid)][req.VdiffUuid]
+ if !ok {
+ return nil, fmt.Errorf("request %+v not found for tablet %d", req, tablet.Alias.Uid)
+ }
+ return resp, nil
+}
+
+func (tmc *testVDiffTMClient) ReadVReplicationWorkflow(ctx context.Context, tablet *topodatapb.Tablet, request *tabletmanagerdatapb.ReadVReplicationWorkflowRequest) (*tabletmanagerdatapb.ReadVReplicationWorkflowResponse, error) {
+ id := int32(1)
+ resp := &tabletmanagerdatapb.ReadVReplicationWorkflowResponse{
+ Workflow: "vdiffTest",
+ }
+
+ sourceShards, _ := tmc.testEnv.topoServ.GetShardNames(ctx, tmc.testEnv.sourceKeyspace)
+ streams := make([]*tabletmanagerdatapb.ReadVReplicationWorkflowResponse_Stream, 0, len(sourceShards))
+ for _, shard := range sourceShards {
+ streams = append(streams, &tabletmanagerdatapb.ReadVReplicationWorkflowResponse_Stream{
+ Id: id,
+ Bls: &binlogdatapb.BinlogSource{
+ Keyspace: tmc.testEnv.sourceKeyspace,
+ Shard: shard,
+ Filter: &binlogdatapb.Filter{
+ Rules: []*binlogdatapb.Rule{
+ {
+ Match: ".*",
+ },
+ },
+ },
+ },
+ })
+ id++
+ }
+ resp.Streams = streams
+
+ return resp, nil
+}
diff --git a/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff_test.go b/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff_test.go
new file mode 100644
index 00000000000..fd535bb2aad
--- /dev/null
+++ b/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff_test.go
@@ -0,0 +1,530 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package vdiff
+
+import (
+ "context"
+ "fmt"
+ "math"
+ "testing"
+ "time"
+
+ "github.com/google/uuid"
+ "github.com/stretchr/testify/require"
+
+ "vitess.io/vitess/go/sqltypes"
+ "vitess.io/vitess/go/vt/vttablet/tabletmanager/vdiff"
+
+ tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata"
+ vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata"
+)
+
+var (
+ fields = sqltypes.MakeTestFields(
+ "vdiff_state|last_error|table_name|uuid|table_state|table_rows|started_at|rows_compared|completed_at|has_mismatch|report",
+ "varbinary|varbinary|varbinary|varchar|varbinary|int64|timestamp|int64|timestamp|int64|json",
+ )
+ options = &tabletmanagerdatapb.VDiffOptions{
+ PickerOptions: &tabletmanagerdatapb.VDiffPickerOptions{
+ TabletTypes: "primary",
+ },
+ CoreOptions: &tabletmanagerdatapb.VDiffCoreOptions{
+ Tables: "t1",
+ },
+ ReportOptions: &tabletmanagerdatapb.VDiffReportOptions{
+ Format: "json",
+ },
+ }
+)
+
+func TestVDiffUnsharded(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ env := newTestVDiffEnv(t, ctx, []string{"0"}, []string{"0"}, "", nil)
+ defer env.close()
+
+ now := time.Now()
+ UUID := uuid.New().String()
+ req := &tabletmanagerdatapb.VDiffRequest{
+ Keyspace: env.targetKeyspace,
+ Workflow: env.workflow,
+ Action: string(vdiff.ShowAction),
+ ActionArg: UUID,
+ }
+ starttime := now.UTC().Format(vdiff.TimestampFormat)
+ comptime := now.Add(1 * time.Second).UTC().Format(vdiff.TimestampFormat)
+ goodReportfmt := `{
+ "Workflow": "vdiffTest",
+ "Keyspace": "%s",
+ "State": "completed",
+ "UUID": "%s",
+ "RowsCompared": %d,
+ "HasMismatch": %t,
+ "Shards": "0",
+ "StartedAt": "%s",
+ "CompletedAt": "%s"
+}
+`
+
+ badReportfmt := `{
+ "Workflow": "vdiffTest",
+ "Keyspace": "%s",
+ "State": "completed",
+ "UUID": "%s",
+ "RowsCompared": %d,
+ "HasMismatch": %t,
+ "Shards": "0",
+ "StartedAt": "%s",
+ "CompletedAt": "%s",
+ "TableSummary": {
+ "t1": {
+ "TableName": "t1",
+ "State": "completed",
+ "RowsCompared": %d,
+ "MatchingRows": %d,
+ "MismatchedRows": %d,
+ "ExtraRowsSource": %d,
+ "ExtraRowsTarget": %d
+ }
+ },
+ "Reports": {
+ "t1": {
+ "0": {
+ "TableName": "t1",
+ "ProcessedRows": %d,
+ "MatchingRows": %d,
+ "MismatchedRows": %d,
+ "ExtraRowsSource": %d,
+ "ExtraRowsTarget": %d,
+ %s
+ }
+ }
+ }
+}
+`
+
+ testcases := []struct {
+ id string
+ result *sqltypes.Result
+ report string
+ }{{
+ id: "1",
+ result: sqltypes.MakeTestResult(fields,
+ "completed||t1|"+UUID+"|completed|3|"+starttime+"|3|"+comptime+"|0|"+
+ `{"TableName": "t1", "MatchingRows": 3, "ProcessedRows": 3, "MismatchedRows": 0, "ExtraRowsSource": 0, `+
+ `"ExtraRowsTarget": 0}`),
+ report: fmt.Sprintf(goodReportfmt,
+ env.targetKeyspace, UUID, 3, false, starttime, comptime,
+ ),
+ }, {
+ id: "2",
+ result: sqltypes.MakeTestResult(fields,
+ "completed||t1|"+UUID+"|completed|3|"+starttime+"|3|"+comptime+"|1|"+
+ `{"TableName": "t1", "MatchingRows": 1, "ProcessedRows": 3, "MismatchedRows": 0, "ExtraRowsSource": 0, `+
+ `"ExtraRowsTarget": 2, "ExtraRowsTargetSample": [{"Row": {"c1": "2", "c2": "4"}}]}`),
+ report: fmt.Sprintf(badReportfmt,
+ env.targetKeyspace, UUID, 3, true, starttime, comptime, 3, 1, 0, 0, 2, 3, 1, 0, 0, 2,
+ `"ExtraRowsTargetSample": [
+ {
+ "Row": {
+ "c1": "2",
+ "c2": "4"
+ }
+ }
+ ]`),
+ }, {
+ id: "3",
+ result: sqltypes.MakeTestResult(fields,
+ "completed||t1|"+UUID+"|completed|3|"+starttime+"|3|"+comptime+"|1|"+
+ `{"TableName": "t1", "MatchingRows": 1, "ProcessedRows": 3, "MismatchedRows": 0, "ExtraRowsSource": 2, `+
+ `"ExtraRowsTarget": 0, "ExtraRowsSourceSample": [{"Row": {"c1": "2", "c2": "4"}}]}`),
+ report: fmt.Sprintf(badReportfmt,
+ env.targetKeyspace, UUID, 3, true, starttime, comptime, 3, 1, 0, 2, 0, 3, 1, 0, 2, 0,
+ `"ExtraRowsSourceSample": [
+ {
+ "Row": {
+ "c1": "2",
+ "c2": "4"
+ }
+ }
+ ]`),
+ }, {
+ id: "4",
+ result: sqltypes.MakeTestResult(fields,
+ "completed||t1|"+UUID+"|completed|3|"+starttime+"|3|"+comptime+"|1|"+
+ `{"TableName": "t1", "MatchingRows": 2, "ProcessedRows": 3, "MismatchedRows": 0, "ExtraRowsSource": 1, `+
+ `"ExtraRowsTarget": 0, "ExtraRowsSourceSample": [{"Row": {"c1": "2", "c2": "4"}}]}`),
+ report: fmt.Sprintf(badReportfmt,
+ env.targetKeyspace, UUID, 3, true, starttime, comptime, 3, 2, 0, 1, 0, 3, 2, 0, 1, 0,
+ `"ExtraRowsSourceSample": [
+ {
+ "Row": {
+ "c1": "2",
+ "c2": "4"
+ }
+ }
+ ]`),
+ }, {
+ id: "5",
+ result: sqltypes.MakeTestResult(fields,
+ "completed||t1|"+UUID+"|completed|3|"+starttime+"|3|"+comptime+"|1|"+
+ `{"TableName": "t1", "MatchingRows": 2, "ProcessedRows": 3, "MismatchedRows": 0, "ExtraRowsSource": 1, `+
+ `"ExtraRowsTarget": 0, "ExtraRowsSourceSample": [{"Row": {"c1": "2", "c2": "4"}}]}`),
+ report: fmt.Sprintf(badReportfmt,
+ env.targetKeyspace, UUID, 3, true, starttime, comptime, 3, 2, 0, 1, 0, 3, 2, 0, 1, 0,
+ `"ExtraRowsSourceSample": [
+ {
+ "Row": {
+ "c1": "2",
+ "c2": "4"
+ }
+ }
+ ]`),
+ }, {
+ id: "6",
+ result: sqltypes.MakeTestResult(fields,
+ "completed||t1|"+UUID+"|completed|3|"+starttime+"|3|"+comptime+"|1|"+
+ `{"TableName": "t1", "MatchingRows": 2, "ProcessedRows": 3, "MismatchedRows": 1, "ExtraRowsSource": 0, `+
+ `"ExtraRowsTarget": 0, "MismatchedRowsSample": [{"Source": {"Row": {"c1": "2", "c2": "3"}}, `+
+ `"Target": {"Row": {"c1": "2", "c2": "4"}}}]}`),
+ report: fmt.Sprintf(badReportfmt,
+ env.targetKeyspace, UUID, 3, true, starttime, comptime, 3, 2, 1, 0, 0, 3, 2, 1, 0, 0,
+ `"MismatchedRowsSample": [
+ {
+ "Source": {
+ "Row": {
+ "c1": "2",
+ "c2": "3"
+ }
+ },
+ "Target": {
+ "Row": {
+ "c1": "2",
+ "c2": "4"
+ }
+ }
+ }
+ ]`),
+ }, {
+ id: "7", // --only_pks
+ result: sqltypes.MakeTestResult(fields,
+ "completed||t1|"+UUID+"|completed|3|"+starttime+"|3|"+comptime+"|1|"+
+ `{"TableName": "t1", "MatchingRows": 2, "ProcessedRows": 3, "MismatchedRows": 1, "ExtraRowsSource": 0, `+
+ `"ExtraRowsTarget": 0, "MismatchedRowsSample": [{"Source": {"Row": {"c1": "2"}}, `+
+ `"Target": {"Row": {"c1": "2"}}}]}`),
+ report: fmt.Sprintf(badReportfmt,
+ env.targetKeyspace, UUID, 3, true, starttime, comptime, 3, 2, 1, 0, 0, 3, 2, 1, 0, 0,
+ `"MismatchedRowsSample": [
+ {
+ "Source": {
+ "Row": {
+ "c1": "2"
+ }
+ },
+ "Target": {
+ "Row": {
+ "c1": "2"
+ }
+ }
+ }
+ ]`),
+ }, {
+ id: "8", // --debug_query
+ result: sqltypes.MakeTestResult(fields,
+ "completed||t1|"+UUID+"|completed|3|"+starttime+"|3|"+comptime+"|1|"+
+ `{"TableName": "t1", "MatchingRows": 2, "ProcessedRows": 3, "MismatchedRows": 1, "ExtraRowsSource": 0, `+
+ `"ExtraRowsTarget": 0, "MismatchedRowsSample": [{"Source": {"Row": {"c1": "2", "c2": "3"}, "Query": "select c1, c2 from t1 where c1=2;"}, `+
+ `"Target": {"Row": {"c1": "2", "c2": "4"}, "Query": "select c1, c2 from t1 where c1=2;"}}]}`),
+ report: fmt.Sprintf(badReportfmt,
+ env.targetKeyspace, UUID, 3, true, starttime, comptime, 3, 2, 1, 0, 0, 3, 2, 1, 0, 0,
+ `"MismatchedRowsSample": [
+ {
+ "Source": {
+ "Row": {
+ "c1": "2",
+ "c2": "3"
+ },
+ "Query": "select c1, c2 from t1 where c1=2;"
+ },
+ "Target": {
+ "Row": {
+ "c1": "2",
+ "c2": "4"
+ },
+ "Query": "select c1, c2 from t1 where c1=2;"
+ }
+ }
+ ]`),
+ },
+ }
+
+ for _, tcase := range testcases {
+ t.Run(tcase.id, func(t *testing.T) {
+ res := &tabletmanagerdatapb.VDiffResponse{
+ Id: 1,
+ Output: sqltypes.ResultToProto3(tcase.result),
+ }
+ env.tmc.setVDResults(env.tablets[200].tablet, req, res)
+ req := &vtctldatapb.VDiffShowRequest{
+ TargetKeyspace: env.targetKeyspace,
+ Workflow: env.workflow,
+ Arg: UUID,
+ }
+
+ resp, err := env.ws.VDiffShow(context.Background(), req)
+ require.NoError(t, err)
+ vds, err := displayShowSingleSummary(env.out, options.ReportOptions.Format, env.targetKeyspace, env.workflow, UUID, resp, false)
+ require.NoError(t, err)
+ require.Equal(t, vdiff.CompletedState, vds)
+
+ require.Equal(t, tcase.report, env.getOutput())
+ env.resetOutput()
+ })
+ }
+}
+
+func TestVDiffSharded(t *testing.T) {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ env := newTestVDiffEnv(t, ctx, []string{"-40", "40-"}, []string{"-80", "80-"}, "", map[string]string{
+ "-80": "MySQL56/0e45e704-7cb9-11ed-a1eb-0242ac120002:1-890",
+ "80-": "MySQL56/1497ddb0-7cb9-11ed-a1eb-0242ac120002:1-891",
+ })
+ defer env.close()
+
+ now := time.Now()
+ UUID := uuid.New().String()
+ req := &tabletmanagerdatapb.VDiffRequest{
+ Keyspace: env.targetKeyspace,
+ Workflow: env.workflow,
+ Action: string(vdiff.ShowAction),
+ ActionArg: UUID,
+ }
+ starttime := now.UTC().Format(vdiff.TimestampFormat)
+ comptime := now.Add(1 * time.Second).UTC().Format(vdiff.TimestampFormat)
+ verbosefmt := `{
+ "Workflow": "vdiffTest",
+ "Keyspace": "%s",
+ "State": "completed",
+ "UUID": "%s",
+ "RowsCompared": %d,
+ "HasMismatch": %t,
+ "Shards": "-80,80-",
+ "StartedAt": "%s",
+ "CompletedAt": "%s",
+ "TableSummary": {
+ "t1": {
+ "TableName": "t1",
+ "State": "completed",
+ "RowsCompared": %d,
+ "MatchingRows": %d,
+ "MismatchedRows": %d,
+ "ExtraRowsSource": %d,
+ "ExtraRowsTarget": %d
+ }
+ },
+ "Reports": {
+ "t1": {
+ "-80": {
+ "TableName": "t1",
+ "ProcessedRows": %d,
+ "MatchingRows": %d,
+ "MismatchedRows": %d,
+ "ExtraRowsSource": %d,
+ "ExtraRowsTarget": %d
+ },
+ "80-": {
+ "TableName": "t1",
+ "ProcessedRows": %d,
+ "MatchingRows": %d,
+ "MismatchedRows": %d,
+ "ExtraRowsSource": %d,
+ "ExtraRowsTarget": %d
+ }
+ }
+ }
+}
+`
+
+ testcases := []struct {
+ id string
+ shard1Res *sqltypes.Result
+ shard2Res *sqltypes.Result
+ report string
+ }{{
+ id: "1",
+ shard1Res: sqltypes.MakeTestResult(fields,
+ "completed||t1|"+UUID+"|completed|3|"+starttime+"|3|"+comptime+"|0|"+
+ `{"TableName": "t1", "MatchingRows": 3, "ProcessedRows": 3, "MismatchedRows": 0, "ExtraRowsSource": 0, `+
+ `"ExtraRowsTarget": 0}`),
+ shard2Res: sqltypes.MakeTestResult(fields,
+ "completed||t1|"+UUID+"|completed|3|"+starttime+"|3|"+comptime+"|0|"+
+ `{"TableName": "t1", "MatchingRows": 3, "ProcessedRows": 3, "MismatchedRows": 0, "ExtraRowsSource": 0, `+
+ `"ExtraRowsTarget": 0}`),
+ report: fmt.Sprintf(verbosefmt,
+ env.targetKeyspace, UUID, 6, false, starttime, comptime, 6, 6, 0, 0, 0, 3, 3, 0, 0, 0, 3, 3, 0, 0, 0,
+ ),
+ }}
+
+ for _, tcase := range testcases {
+ t.Run(tcase.id, func(t *testing.T) {
+ shard1Res := &tabletmanagerdatapb.VDiffResponse{
+ Id: 1,
+ Output: sqltypes.ResultToProto3(tcase.shard1Res),
+ }
+ shard2Res := &tabletmanagerdatapb.VDiffResponse{
+ Id: 1,
+ Output: sqltypes.ResultToProto3(tcase.shard2Res),
+ }
+ env.tmc.setVDResults(env.tablets[200].tablet, req, shard1Res)
+ env.tmc.setVDResults(env.tablets[210].tablet, req, shard2Res)
+ req := &vtctldatapb.VDiffShowRequest{
+ TargetKeyspace: env.targetKeyspace,
+ Workflow: env.workflow,
+ Arg: UUID,
+ }
+
+ resp, err := env.ws.VDiffShow(context.Background(), req)
+ require.NoError(t, err)
+ vds, err := displayShowSingleSummary(env.out, options.ReportOptions.Format, env.targetKeyspace, env.workflow, UUID, resp, true)
+ require.NoError(t, err)
+ require.Equal(t, vdiff.CompletedState, vds)
+
+ require.Equal(t, tcase.report, env.getOutput())
+ env.resetOutput()
+ })
+ }
+}
+
+func TestGetStructNames(t *testing.T) {
+ type s struct {
+ A string
+ B int64
+ }
+ got := getStructFieldNames(s{})
+ want := []string{"A", "B"}
+ require.EqualValues(t, want, got)
+}
+
+func TestBuildProgressReport(t *testing.T) {
+ now := time.Now()
+ type args struct {
+ summary *summary
+ rowsToCompare int64
+ }
+ tests := []struct {
+ name string
+ args args
+ want *vdiff.ProgressReport
+ }{
+ {
+ name: "no progress",
+ args: args{
+ summary: &summary{RowsCompared: 0},
+ rowsToCompare: 100,
+ },
+ want: &vdiff.ProgressReport{
+ Percentage: 0,
+ ETA: "", // no ETA
+ },
+ },
+ {
+ name: "one third of the way",
+ args: args{
+ summary: &summary{
+ RowsCompared: 33,
+ StartedAt: now.Add(-10 * time.Second).UTC().Format(vdiff.TimestampFormat),
+ },
+ rowsToCompare: 100,
+ },
+ want: &vdiff.ProgressReport{
+ Percentage: 33,
+ ETA: now.Add(20 * time.Second).UTC().Format(vdiff.TimestampFormat),
+ },
+ },
+ {
+ name: "half way",
+ args: args{
+ summary: &summary{
+ RowsCompared: 5000000000,
+ StartedAt: now.Add(-10 * time.Hour).UTC().Format(vdiff.TimestampFormat),
+ },
+ rowsToCompare: 10000000000,
+ },
+ want: &vdiff.ProgressReport{
+ Percentage: 50,
+ ETA: now.Add(10 * time.Hour).UTC().Format(vdiff.TimestampFormat),
+ },
+ },
+ {
+ name: "full progress",
+ args: args{
+ summary: &summary{
+ RowsCompared: 100,
+ CompletedAt: now.UTC().Format(vdiff.TimestampFormat),
+ },
+ rowsToCompare: 100,
+ },
+ want: &vdiff.ProgressReport{
+ Percentage: 100,
+ ETA: now.UTC().Format(vdiff.TimestampFormat),
+ },
+ },
+ {
+ name: "more than in I_S",
+ args: args{
+ summary: &summary{
+ RowsCompared: 100,
+ CompletedAt: now.UTC().Format(vdiff.TimestampFormat),
+ },
+ rowsToCompare: 50,
+ },
+ want: &vdiff.ProgressReport{
+ Percentage: 100,
+ ETA: now.UTC().Format(vdiff.TimestampFormat),
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ buildProgressReport(tt.args.summary, tt.args.rowsToCompare)
+ // We always check the percentage
+ require.Equal(t, int(tt.want.Percentage), int(tt.args.summary.Progress.Percentage))
+
+ // We only check the ETA if there is one.
+ if tt.want.ETA != "" {
+ // Let's check that we're within 1 second to avoid flakes.
+ wantTime, err := time.Parse(vdiff.TimestampFormat, tt.want.ETA)
+ require.NoError(t, err)
+ var timeDiff float64
+ if tt.want.Percentage == 100 {
+ completedTime, err := time.Parse(vdiff.TimestampFormat, tt.args.summary.CompletedAt)
+ require.NoError(t, err)
+ timeDiff = math.Abs(completedTime.Sub(wantTime).Seconds())
+ } else {
+ startTime, err := time.Parse(vdiff.TimestampFormat, tt.args.summary.StartedAt)
+ require.NoError(t, err)
+ completedTimeUnix := float64(now.UTC().Unix()-startTime.UTC().Unix()) * (100 / tt.want.Percentage)
+ estimatedTime, err := time.Parse(vdiff.TimestampFormat, tt.want.ETA)
+ require.NoError(t, err)
+ timeDiff = math.Abs(estimatedTime.Sub(startTime).Seconds() - completedTimeUnix)
+ }
+ require.LessOrEqual(t, timeDiff, 1.0)
+ }
+ })
+ }
+}
diff --git a/go/cmd/vtctldclient/command/vreplication/workflow/delete.go b/go/cmd/vtctldclient/command/vreplication/workflow/delete.go
new file mode 100644
index 00000000000..4eae8076fec
--- /dev/null
+++ b/go/cmd/vtctldclient/command/vreplication/workflow/delete.go
@@ -0,0 +1,76 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package workflow
+
+import (
+ "fmt"
+ "sort"
+
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/cmd/vtctldclient/cli"
+ "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common"
+
+ vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata"
+)
+
+var (
+ deleteOptions = struct {
+ KeepData bool
+ KeepRoutingRules bool
+ }{}
+
+ // delete makes a WorkflowDelete gRPC call to a vtctld.
+ delete = &cobra.Command{
+ Use: "delete",
+ Short: "Delete a VReplication workflow.",
+ Example: `vtctldclient --server localhost:15999 workflow --keyspace customer delete --workflow commerce2customer`,
+ DisableFlagsInUseLine: true,
+ Aliases: []string{"Delete"},
+ Args: cobra.NoArgs,
+ RunE: commandDelete,
+ }
+)
+
+func commandDelete(cmd *cobra.Command, args []string) error {
+ cli.FinishedParsing(cmd)
+
+ req := &vtctldatapb.WorkflowDeleteRequest{
+ Keyspace: baseOptions.Keyspace,
+ Workflow: baseOptions.Workflow,
+ KeepData: deleteOptions.KeepData,
+ KeepRoutingRules: deleteOptions.KeepRoutingRules,
+ }
+ resp, err := common.GetClient().WorkflowDelete(common.GetCommandCtx(), req)
+ if err != nil {
+ return err
+ }
+
+ // Sort the inner TabletInfo slice for deterministic output.
+ sort.Slice(resp.Details, func(i, j int) bool {
+ return resp.Details[i].Tablet.String() < resp.Details[j].Tablet.String()
+ })
+
+ data, err := cli.MarshalJSONPretty(resp)
+ if err != nil {
+ return err
+ }
+
+ fmt.Printf("%s\n", data)
+
+ return nil
+}
diff --git a/go/cmd/vtctldclient/command/vreplication/workflow/get.go b/go/cmd/vtctldclient/command/vreplication/workflow/get.go
new file mode 100644
index 00000000000..69acc535158
--- /dev/null
+++ b/go/cmd/vtctldclient/command/vreplication/workflow/get.go
@@ -0,0 +1,67 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package workflow
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/cmd/vtctldclient/cli"
+ "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common"
+
+ vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata"
+)
+
+var (
+ getWorkflowsOptions = struct {
+ ShowAll bool
+ }{}
+ // GetWorkflows makes a GetWorkflows gRPC call to a vtctld.
+ getWorkflows = &cobra.Command{
+ Use: "GetWorkflows ",
+ Short: "Gets all vreplication workflows (Reshard, MoveTables, etc) in the given keyspace.",
+ DisableFlagsInUseLine: true,
+ Args: cobra.ExactArgs(1),
+ RunE: commandGetWorkflows,
+ }
+)
+
+func commandGetWorkflows(cmd *cobra.Command, args []string) error {
+ cli.FinishedParsing(cmd)
+
+ ks := cmd.Flags().Arg(0)
+
+ resp, err := common.GetClient().GetWorkflows(common.GetCommandCtx(), &vtctldatapb.GetWorkflowsRequest{
+ Keyspace: ks,
+ ActiveOnly: !getWorkflowsOptions.ShowAll,
+ IncludeLogs: workflowShowOptions.IncludeLogs,
+ })
+
+ if err != nil {
+ return err
+ }
+
+ data, err := cli.MarshalJSONPretty(resp)
+ if err != nil {
+ return err
+ }
+
+ fmt.Printf("%s\n", data)
+
+ return nil
+}
diff --git a/go/cmd/vtctldclient/command/vreplication/workflow/show.go b/go/cmd/vtctldclient/command/vreplication/workflow/show.go
new file mode 100644
index 00000000000..ebc18ea250d
--- /dev/null
+++ b/go/cmd/vtctldclient/command/vreplication/workflow/show.go
@@ -0,0 +1,85 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package workflow
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/cmd/vtctldclient/cli"
+ "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common"
+
+ vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata"
+)
+
+var (
+ // WorkflowList makes a GetWorkflows gRPC call to a vtctld.
+ workflowList = &cobra.Command{
+ Use: "list",
+ Short: "List the VReplication workflows in the given keyspace.",
+ Example: `vtctldclient --server localhost:15999 workflow --keyspace customer list`,
+ DisableFlagsInUseLine: true,
+ Aliases: []string{"List"},
+ Args: cobra.NoArgs,
+ RunE: commandShow,
+ }
+
+ // show makes a GetWorkflows gRPC call to a vtctld.
+ show = &cobra.Command{
+ Use: "show",
+ Short: "Show the details for a VReplication workflow.",
+ Example: `vtctldclient --server localhost:15999 workflow --keyspace customer show --workflow commerce2customer`,
+ DisableFlagsInUseLine: true,
+ Aliases: []string{"Show"},
+ Args: cobra.NoArgs,
+ RunE: commandShow,
+ }
+)
+
+func commandShow(cmd *cobra.Command, args []string) error {
+ cli.FinishedParsing(cmd)
+
+ req := &vtctldatapb.GetWorkflowsRequest{
+ Keyspace: baseOptions.Keyspace,
+ Workflow: baseOptions.Workflow,
+ IncludeLogs: workflowShowOptions.IncludeLogs,
+ }
+ resp, err := common.GetClient().GetWorkflows(common.GetCommandCtx(), req)
+ if err != nil {
+ return err
+ }
+
+ var data []byte
+ if strings.ToLower(cmd.Name()) == "list" {
+ // We only want the names.
+ Names := make([]string, len(resp.Workflows))
+ for i, wf := range resp.Workflows {
+ Names[i] = wf.Name
+ }
+ data, err = cli.MarshalJSONPretty(Names)
+ } else {
+ data, err = cli.MarshalJSONPretty(resp)
+ }
+ if err != nil {
+ return err
+ }
+ fmt.Println(string(data))
+
+ return nil
+}
diff --git a/go/cmd/vtctldclient/command/vreplication/workflow/state.go b/go/cmd/vtctldclient/command/vreplication/workflow/state.go
new file mode 100644
index 00000000000..89e75312ab2
--- /dev/null
+++ b/go/cmd/vtctldclient/command/vreplication/workflow/state.go
@@ -0,0 +1,106 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package workflow
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/cmd/vtctldclient/cli"
+ "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common"
+ "vitess.io/vitess/go/textutil"
+
+ binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata"
+ tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata"
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+ vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata"
+)
+
+var (
+ // start makes a WorfklowUpdate gRPC call to a vtctld.
+ start = &cobra.Command{
+ Use: "start",
+ Short: "Start a VReplication workflow.",
+ Example: `vtctldclient --server localhost:15999 workflow --keyspace customer start --workflow commerce2customer`,
+ DisableFlagsInUseLine: true,
+ Aliases: []string{"Start"},
+ Args: cobra.NoArgs,
+ RunE: commandUpdateState,
+ }
+
+ // stop makes a WorfklowUpdate gRPC call to a vtctld.
+ stop = &cobra.Command{
+ Use: "stop",
+ Short: "Stop a VReplication workflow.",
+ Example: `vtctldclient --server localhost:15999 workflow --keyspace customer stop --workflow commerce2customer`,
+ DisableFlagsInUseLine: true,
+ Aliases: []string{"Stop"},
+ Args: cobra.NoArgs,
+ RunE: commandUpdateState,
+ }
+)
+
+func commandUpdateState(cmd *cobra.Command, args []string) error {
+ cli.FinishedParsing(cmd)
+
+ var state binlogdatapb.VReplicationWorkflowState
+ switch strings.ToLower(cmd.Name()) {
+ case "start":
+ if err := common.CanRestartWorkflow(baseOptions.Keyspace, baseOptions.Workflow); err != nil {
+ return err
+ }
+ state = binlogdatapb.VReplicationWorkflowState_Running
+ case "stop":
+ state = binlogdatapb.VReplicationWorkflowState_Stopped
+ default:
+ return fmt.Errorf("invalid workflow state: %s", args[0])
+ }
+
+ // The only thing we're updating is the state.
+ req := &vtctldatapb.WorkflowUpdateRequest{
+ Keyspace: baseOptions.Keyspace,
+ TabletRequest: &tabletmanagerdatapb.UpdateVReplicationWorkflowRequest{
+ Workflow: baseOptions.Workflow,
+ Cells: textutil.SimulatedNullStringSlice,
+ TabletTypes: []topodatapb.TabletType{topodatapb.TabletType(textutil.SimulatedNullInt)},
+ OnDdl: binlogdatapb.OnDDLAction(textutil.SimulatedNullInt),
+ State: state,
+ },
+ }
+
+ resp, err := common.GetClient().WorkflowUpdate(common.GetCommandCtx(), req)
+ if err != nil {
+ return err
+ }
+
+ // Sort the inner TabletInfo slice for deterministic output.
+ sort.Slice(resp.Details, func(i, j int) bool {
+ return resp.Details[i].Tablet.String() < resp.Details[j].Tablet.String()
+ })
+
+ data, err := cli.MarshalJSONPretty(resp)
+ if err != nil {
+ return err
+ }
+
+ fmt.Printf("%s\n", data)
+
+ return nil
+}
diff --git a/go/cmd/vtctldclient/command/vreplication/workflow/update.go b/go/cmd/vtctldclient/command/vreplication/workflow/update.go
new file mode 100644
index 00000000000..466d81e8be4
--- /dev/null
+++ b/go/cmd/vtctldclient/command/vreplication/workflow/update.go
@@ -0,0 +1,135 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package workflow
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/cmd/vtctldclient/cli"
+ "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common"
+ "vitess.io/vitess/go/textutil"
+
+ binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata"
+ tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata"
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+ vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata"
+)
+
+var (
+ updateOptions = struct {
+ Cells []string
+ TabletTypes []topodatapb.TabletType
+ TabletTypesInPreferenceOrder bool
+ OnDDL string
+ }{}
+
+ // update makes a WorkflowUpdate gRPC call to a vtctld.
+ update = &cobra.Command{
+ Use: "update",
+ Short: "Update the configuration parameters for a VReplication workflow.",
+ Example: `vtctldclient --server localhost:15999 workflow --keyspace customer update --workflow commerce2customer --cells zone1 --cells zone2 -c "zone3,zone4" -c zone5`,
+ DisableFlagsInUseLine: true,
+ Aliases: []string{"Update"},
+ Args: cobra.NoArgs,
+ PreRunE: func(cmd *cobra.Command, args []string) error {
+ changes := false
+ if cmd.Flags().Lookup("cells").Changed { // Validate the provided value(s)
+ changes = true
+ for i, cell := range updateOptions.Cells { // Which only means trimming whitespace
+ updateOptions.Cells[i] = strings.TrimSpace(cell)
+ }
+ } else {
+ updateOptions.Cells = textutil.SimulatedNullStringSlice
+ }
+ if cmd.Flags().Lookup("tablet-types").Changed {
+ if err := common.ParseTabletTypes(cmd); err != nil {
+ return err
+ }
+ changes = true
+ } else {
+ updateOptions.TabletTypes = []topodatapb.TabletType{topodatapb.TabletType(textutil.SimulatedNullInt)}
+ }
+ if cmd.Flags().Lookup("on-ddl").Changed { // Validate the provided value
+ changes = true
+ if _, ok := binlogdatapb.OnDDLAction_value[strings.ToUpper(updateOptions.OnDDL)]; !ok {
+ return fmt.Errorf("invalid on-ddl value: %s", updateOptions.OnDDL)
+ }
+ } // Simulated NULL will need to be handled in command
+ if !changes {
+ return fmt.Errorf("no configuration options specified to update")
+ }
+ return nil
+ },
+ RunE: commandUpdate,
+ }
+)
+
+func commandUpdate(cmd *cobra.Command, args []string) error {
+ cli.FinishedParsing(cmd)
+
+ // We've already validated any provided value, if one WAS provided.
+ // Now we need to do the mapping from the string representation to
+ // the enum value.
+ onddl := int32(textutil.SimulatedNullInt) // Simulated NULL when no value provided
+ if val, ok := binlogdatapb.OnDDLAction_value[strings.ToUpper(updateOptions.OnDDL)]; ok {
+ onddl = val
+ }
+
+ // Simulated NULL when no value is provided.
+ tsp := tabletmanagerdatapb.TabletSelectionPreference_UNKNOWN
+ if cmd.Flags().Lookup("tablet-types-in-order").Changed {
+ if updateOptions.TabletTypesInPreferenceOrder {
+ tsp = tabletmanagerdatapb.TabletSelectionPreference_INORDER
+ } else {
+ tsp = tabletmanagerdatapb.TabletSelectionPreference_ANY
+ }
+ }
+
+ req := &vtctldatapb.WorkflowUpdateRequest{
+ Keyspace: baseOptions.Keyspace,
+ TabletRequest: &tabletmanagerdatapb.UpdateVReplicationWorkflowRequest{
+ Workflow: baseOptions.Workflow,
+ Cells: updateOptions.Cells,
+ TabletTypes: updateOptions.TabletTypes,
+ TabletSelectionPreference: tsp,
+ OnDdl: binlogdatapb.OnDDLAction(onddl),
+ },
+ }
+
+ resp, err := common.GetClient().WorkflowUpdate(common.GetCommandCtx(), req)
+ if err != nil {
+ return err
+ }
+
+ // Sort the inner TabletInfo slice for deterministic output.
+ sort.Slice(resp.Details, func(i, j int) bool {
+ return resp.Details[i].Tablet.String() < resp.Details[j].Tablet.String()
+ })
+
+ data, err := cli.MarshalJSONPretty(resp)
+ if err != nil {
+ return err
+ }
+
+ fmt.Printf("%s\n", data)
+
+ return nil
+}
diff --git a/go/cmd/vtctldclient/command/vreplication/workflow/workflow.go b/go/cmd/vtctldclient/command/vreplication/workflow/workflow.go
new file mode 100644
index 00000000000..e552b61d476
--- /dev/null
+++ b/go/cmd/vtctldclient/command/vreplication/workflow/workflow.go
@@ -0,0 +1,90 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package workflow
+
+import (
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common"
+ "vitess.io/vitess/go/vt/topo/topoproto"
+)
+
+var (
+ // base is a parent command for Workflow commands.
+ base = &cobra.Command{
+ Use: "Workflow --keyspace [command] [command-flags]",
+ Short: "Administer VReplication workflows (Reshard, MoveTables, etc) in the given keyspace.",
+ DisableFlagsInUseLine: true,
+ Aliases: []string{"workflow"},
+ Args: cobra.ExactArgs(1),
+ RunE: commandGetWorkflows,
+ }
+)
+
+var (
+ baseOptions = struct {
+ Keyspace string
+ Workflow string
+ }{}
+
+ workflowShowOptions = struct {
+ IncludeLogs bool
+ }{}
+)
+
+func registerCommands(root *cobra.Command) {
+ base.PersistentFlags().StringVarP(&baseOptions.Keyspace, "keyspace", "k", "", "Keyspace context for the workflow.")
+ base.MarkPersistentFlagRequired("keyspace")
+ root.AddCommand(base)
+
+ getWorkflows.Flags().BoolVar(&workflowShowOptions.IncludeLogs, "include-logs", true, "Include recent logs for the workflows.")
+ getWorkflows.Flags().BoolVarP(&getWorkflowsOptions.ShowAll, "show-all", "a", false, "Show all workflows instead of just active workflows.")
+ root.AddCommand(getWorkflows) // Yes this is supposed to be root as GetWorkflows is a top-level command.
+
+ delete.Flags().StringVarP(&baseOptions.Workflow, "workflow", "w", "", "The workflow you want to delete.")
+ delete.MarkFlagRequired("workflow")
+ delete.Flags().BoolVar(&deleteOptions.KeepData, "keep-data", false, "Keep the partially copied table data from the workflow in the target keyspace.")
+ delete.Flags().BoolVar(&deleteOptions.KeepRoutingRules, "keep-routing-rules", false, "Keep the routing rules created for the workflow.")
+ base.AddCommand(delete)
+
+ base.AddCommand(workflowList)
+
+ show.Flags().StringVarP(&baseOptions.Workflow, "workflow", "w", "", "The workflow you want the details for.")
+ show.MarkFlagRequired("workflow")
+ show.Flags().BoolVar(&workflowShowOptions.IncludeLogs, "include-logs", true, "Include recent logs for the workflow.")
+ base.AddCommand(show)
+
+ start.Flags().StringVarP(&baseOptions.Workflow, "workflow", "w", "", "The workflow you want to start.")
+ start.MarkFlagRequired("workflow")
+ base.AddCommand(start)
+
+ stop.Flags().StringVarP(&baseOptions.Workflow, "workflow", "w", "", "The workflow you want to stop.")
+ stop.MarkFlagRequired("workflow")
+ base.AddCommand(stop)
+
+ update.Flags().StringVarP(&baseOptions.Workflow, "workflow", "w", "", "The workflow you want to update.")
+ update.MarkFlagRequired("workflow")
+ update.Flags().StringSliceVarP(&updateOptions.Cells, "cells", "c", nil, "New Cell(s) or CellAlias(es) (comma-separated) to replicate from.")
+ update.Flags().VarP((*topoproto.TabletTypeListFlag)(&updateOptions.TabletTypes), "tablet-types", "t", "New source tablet types to replicate from (e.g. PRIMARY,REPLICA,RDONLY).")
+ update.Flags().BoolVar(&updateOptions.TabletTypesInPreferenceOrder, "tablet-types-in-order", true, "When performing source tablet selection, look for candidates in the type order as they are listed in the tablet-types flag.")
+ update.Flags().StringVar(&updateOptions.OnDDL, "on-ddl", "", "New instruction on what to do when DDL is encountered in the VReplication stream. Possible values are IGNORE, STOP, EXEC, and EXEC_IGNORE.")
+ base.AddCommand(update)
+}
+
+func init() {
+ common.RegisterCommandHandler("Workflow", registerCommands)
+}
diff --git a/go/cmd/vtctldclient/command/workflows.go b/go/cmd/vtctldclient/command/workflows.go
deleted file mode 100644
index f783ce9c307..00000000000
--- a/go/cmd/vtctldclient/command/workflows.go
+++ /dev/null
@@ -1,193 +0,0 @@
-/*
-Copyright 2021 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package command
-
-import (
- "fmt"
- "sort"
- "strings"
-
- "github.com/spf13/cobra"
-
- "vitess.io/vitess/go/cmd/vtctldclient/cli"
- "vitess.io/vitess/go/textutil"
- "vitess.io/vitess/go/vt/topo/topoproto"
-
- binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata"
- tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata"
- vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata"
-)
-
-var (
- // GetWorkflows makes a GetWorkflows gRPC call to a vtctld.
- GetWorkflows = &cobra.Command{
- Use: "GetWorkflows ",
- Short: "Gets all vreplication workflows (Reshard, MoveTables, etc) in the given keyspace.",
- DisableFlagsInUseLine: true,
- Args: cobra.ExactArgs(1),
- RunE: commandGetWorkflows,
- }
-
- // Workflow is a parent command for Workflow* sub commands.
- Workflow = &cobra.Command{
- Use: "workflow",
- Short: "Administer VReplication workflows (Reshard, MoveTables, etc) in the given keyspace",
- DisableFlagsInUseLine: true,
- Aliases: []string{"Workflow"},
- Args: cobra.ExactArgs(1),
- RunE: commandGetWorkflows,
- }
-
- // WorkflowUpdate makes a WorkflowUpdate gRPC call to a vtctld.
- WorkflowUpdate = &cobra.Command{
- Use: "update",
- Short: "Update the configuration parameters for a VReplication workflow",
- Example: `vtctldclient --server=localhost:15999 workflow --keyspace=customer update --workflow=commerce2customer --cells "zone1" --cells "zone2" -c "zone3,zone4" -c "zone5"`,
- DisableFlagsInUseLine: true,
- Aliases: []string{"Update"},
- Args: cobra.NoArgs,
- PreRunE: func(cmd *cobra.Command, args []string) error {
- changes := false
- if cmd.Flags().Lookup("cells").Changed { // Validate the provided value(s)
- changes = true
- for i, cell := range workflowUpdateOptions.Cells { // Which only means trimming whitespace
- workflowUpdateOptions.Cells[i] = strings.TrimSpace(cell)
- }
- } else {
- workflowUpdateOptions.Cells = textutil.SimulatedNullStringSlice
- }
- if cmd.Flags().Lookup("tablet-types").Changed { // Validate the provided value(s)
- changes = true
- for i, tabletType := range workflowUpdateOptions.TabletTypes {
- workflowUpdateOptions.TabletTypes[i] = strings.ToUpper(strings.TrimSpace(tabletType))
- if _, err := topoproto.ParseTabletType(workflowUpdateOptions.TabletTypes[i]); err != nil {
- return err
- }
- }
- } else {
- workflowUpdateOptions.TabletTypes = textutil.SimulatedNullStringSlice
- }
- if cmd.Flags().Lookup("on-ddl").Changed { // Validate the provided value
- changes = true
- if _, ok := binlogdatapb.OnDDLAction_value[strings.ToUpper(workflowUpdateOptions.OnDDL)]; !ok {
- return fmt.Errorf("invalid on-ddl value: %s", workflowUpdateOptions.OnDDL)
- }
- } // Simulated NULL will need to be handled in command
- if !changes {
- return fmt.Errorf("no configuration options specified to update")
- }
- return nil
- },
- RunE: commandWorkflowUpdate,
- }
-)
-
-var getWorkflowsOptions = struct {
- ShowAll bool
-}{}
-
-func commandGetWorkflows(cmd *cobra.Command, args []string) error {
- cli.FinishedParsing(cmd)
-
- ks := cmd.Flags().Arg(0)
-
- resp, err := client.GetWorkflows(commandCtx, &vtctldatapb.GetWorkflowsRequest{
- Keyspace: ks,
- ActiveOnly: !getWorkflowsOptions.ShowAll,
- })
-
- if err != nil {
- return err
- }
-
- data, err := cli.MarshalJSON(resp)
- if err != nil {
- return err
- }
-
- fmt.Printf("%s\n", data)
-
- return nil
-}
-
-var (
- workflowOptions = struct {
- Keyspace string
- }{}
- workflowUpdateOptions = struct {
- Workflow string
- Cells []string
- TabletTypes []string
- OnDDL string
- }{}
-)
-
-func commandWorkflowUpdate(cmd *cobra.Command, args []string) error {
- cli.FinishedParsing(cmd)
-
- // We've already validated any provided value, if one WAS provided.
- // Now we need to do the mapping from the string representation to
- // the enum value.
- onddl := int32(textutil.SimulatedNullInt) // Simulated NULL when no value provided
- if val, ok := binlogdatapb.OnDDLAction_value[strings.ToUpper(workflowUpdateOptions.OnDDL)]; ok {
- onddl = val
- }
-
- req := &vtctldatapb.WorkflowUpdateRequest{
- Keyspace: workflowOptions.Keyspace,
- TabletRequest: &tabletmanagerdatapb.UpdateVRWorkflowRequest{
- Workflow: workflowUpdateOptions.Workflow,
- Cells: workflowUpdateOptions.Cells,
- TabletTypes: workflowUpdateOptions.TabletTypes,
- OnDdl: binlogdatapb.OnDDLAction(onddl),
- },
- }
-
- resp, err := client.WorkflowUpdate(commandCtx, req)
- if err != nil {
- return err
- }
-
- // Sort the inner TabletInfo slice for deterministic output.
- sort.Slice(resp.Details, func(i, j int) bool {
- return resp.Details[i].Tablet < resp.Details[j].Tablet
- })
-
- data, err := cli.MarshalJSON(resp)
- if err != nil {
- return err
- }
-
- fmt.Printf("%s\n", data)
-
- return nil
-}
-
-func init() {
- GetWorkflows.Flags().BoolVarP(&getWorkflowsOptions.ShowAll, "show-all", "a", false, "Show all workflows instead of just active workflows.")
- Root.AddCommand(GetWorkflows)
-
- Workflow.PersistentFlags().StringVarP(&workflowOptions.Keyspace, "keyspace", "k", "", "Keyspace context for the workflow (required)")
- Workflow.MarkPersistentFlagRequired("keyspace")
- Root.AddCommand(Workflow)
- WorkflowUpdate.Flags().StringVarP(&workflowUpdateOptions.Workflow, "workflow", "w", "", "The workflow you want to update (required)")
- WorkflowUpdate.MarkFlagRequired("workflow")
- WorkflowUpdate.Flags().StringSliceVarP(&workflowUpdateOptions.Cells, "cells", "c", nil, "New Cell(s) or CellAlias(es) (comma-separated) to replicate from")
- WorkflowUpdate.Flags().StringSliceVarP(&workflowUpdateOptions.TabletTypes, "tablet-types", "t", nil, "New source tablet types to replicate from (e.g. PRIMARY,REPLICA,RDONLY)")
- WorkflowUpdate.Flags().StringVar(&workflowUpdateOptions.OnDDL, "on-ddl", "", "New instruction on what to do when DDL is encountered in the VReplication stream. Possible values are IGNORE, STOP, EXEC, and EXEC_IGNORE")
- Workflow.AddCommand(WorkflowUpdate)
-}
diff --git a/go/cmd/vtexplain/cli/vtexplain.go b/go/cmd/vtexplain/cli/vtexplain.go
new file mode 100644
index 00000000000..8b0622cf8a3
--- /dev/null
+++ b/go/cmd/vtexplain/cli/vtexplain.go
@@ -0,0 +1,196 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cli
+
+import (
+ "context"
+ "fmt"
+ "os"
+
+ "vitess.io/vitess/go/acl"
+ "vitess.io/vitess/go/vt/logutil"
+ "vitess.io/vitess/go/vt/servenv"
+ "vitess.io/vitess/go/vt/vtexplain"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
+
+ "github.com/spf13/cobra"
+
+ querypb "vitess.io/vitess/go/vt/proto/query"
+)
+
+var (
+ sqlFlag string
+ sqlFileFlag string
+ schemaFlag string
+ schemaFileFlag string
+ vschemaFlag string
+ vschemaFileFlag string
+ ksShardMapFlag string
+ ksShardMapFileFlag string
+ normalize bool
+ dbName string
+ plannerVersionStr string
+
+ numShards = 2
+ replicationMode = "ROW"
+ executionMode = "multi"
+ outputMode = "text"
+
+ Main = &cobra.Command{
+ Use: "vtexplain",
+ Short: "vtexplain is a command line tool which provides information on how Vitess plans to execute a particular query.",
+ Long: `vtexplain is a command line tool which provides information on how Vitess plans to execute a particular query.
+
+It can be used to validate queries for compatibility with Vitess.
+
+For a user guide that describes how to use the vtexplain tool to explain how Vitess executes a particular SQL statement, see Analyzing a SQL statement.
+
+## Limitations
+
+### The VSchema must use a keyspace name.
+
+VTExplain requires a keyspace name for each keyspace in an input VSchema:
+` +
+ "```\n" +
+ `"keyspace_name": {
+ "_comment": "Keyspace definition goes here."
+}
+` + "```" + `
+
+If no keyspace name is present, VTExplain will return the following error:
+` +
+ "```\n" +
+ `ERROR: initVtgateExecutor: json: cannot unmarshal bool into Go value of type map[string]json.RawMessage
+` + "```\n",
+ Example: "Explain how Vitess will execute the query `SELECT * FROM users` using the VSchema contained in `vschemas.json` and the database schema `schema.sql`:\n\n" +
+ "```\nvtexplain --vschema-file vschema.json --schema-file schema.sql --sql \"SELECT * FROM users\"\n```\n\n" +
+
+ "Explain how the example will execute on 128 shards using Row-based replication:\n\n" +
+
+ "```\nvtexplain -- -shards 128 --vschema-file vschema.json --schema-file schema.sql --replication-mode \"ROW\" --output-mode text --sql \"INSERT INTO users (user_id, name) VALUES(1, 'john')\"\n```\n",
+ Args: cobra.NoArgs,
+ PreRunE: servenv.CobraPreRunE,
+ RunE: run,
+ }
+)
+
+func init() {
+ servenv.MoveFlagsToCobraCommand(Main)
+ Main.Flags().StringVar(&sqlFlag, "sql", sqlFlag, "A list of semicolon-delimited SQL commands to analyze")
+ Main.Flags().StringVar(&sqlFileFlag, "sql-file", sqlFileFlag, "Identifies the file that contains the SQL commands to analyze")
+ Main.Flags().StringVar(&schemaFlag, "schema", schemaFlag, "The SQL table schema")
+ Main.Flags().StringVar(&schemaFileFlag, "schema-file", schemaFileFlag, "Identifies the file that contains the SQL table schema")
+ Main.Flags().StringVar(&vschemaFlag, "vschema", vschemaFlag, "Identifies the VTGate routing schema")
+ Main.Flags().StringVar(&vschemaFileFlag, "vschema-file", vschemaFileFlag, "Identifies the VTGate routing schema file")
+ Main.Flags().StringVar(&ksShardMapFlag, "ks-shard-map", ksShardMapFlag, "JSON map of keyspace name -> shard name -> ShardReference object. The inner map is the same as the output of FindAllShardsInKeyspace")
+ Main.Flags().StringVar(&ksShardMapFileFlag, "ks-shard-map-file", ksShardMapFileFlag, "File containing json blob of keyspace name -> shard name -> ShardReference object")
+ Main.Flags().StringVar(&replicationMode, "replication-mode", replicationMode, "The replication mode to simulate -- must be set to either ROW or STATEMENT")
+ Main.Flags().BoolVar(&normalize, "normalize", normalize, "Whether to enable vtgate normalization")
+ Main.Flags().StringVar(&dbName, "dbname", dbName, "Optional database target to override normal routing")
+ Main.Flags().StringVar(&plannerVersionStr, "planner-version", plannerVersionStr, "Sets the default planner to use. Valid values are: Gen4, Gen4Greedy, Gen4Left2Right")
+ Main.Flags().IntVar(&numShards, "shards", numShards, "Number of shards per keyspace. Passing --ks-shard-map/--ks-shard-map-file causes this flag to be ignored.")
+ Main.Flags().StringVar(&executionMode, "execution-mode", executionMode, "The execution mode to simulate -- must be set to multi, legacy-autocommit, or twopc")
+ Main.Flags().StringVar(&outputMode, "output-mode", outputMode, "Output in human-friendly text or json")
+
+ acl.RegisterFlags(Main.Flags())
+}
+
+// getFileParam returns a string containing either flag is not "",
+// or the content of the file named flagFile
+func getFileParam(flag, flagFile, name string, required bool) (string, error) {
+ if flag != "" {
+ if flagFile != "" {
+ return "", fmt.Errorf("action requires only one of %v or %v-file", name, name)
+ }
+ return flag, nil
+ }
+
+ if flagFile == "" {
+ if required {
+ return "", fmt.Errorf("action requires one of %v or %v-file", name, name)
+ }
+
+ return "", nil
+ }
+ data, err := os.ReadFile(flagFile)
+ if err != nil {
+ return "", fmt.Errorf("cannot read file %v: %v", flagFile, err)
+ }
+ return string(data), nil
+}
+
+func run(cmd *cobra.Command, args []string) error {
+ defer logutil.Flush()
+
+ servenv.Init()
+ return parseAndRun()
+}
+
+func parseAndRun() error {
+ plannerVersion, _ := plancontext.PlannerNameToVersion(plannerVersionStr)
+ if plannerVersionStr != "" && plannerVersion != querypb.ExecuteOptions_Gen4 {
+ return fmt.Errorf("invalid value specified for planner-version of '%s' -- valid value is Gen4 or an empty value to use the default planner", plannerVersionStr)
+ }
+
+ sql, err := getFileParam(sqlFlag, sqlFileFlag, "sql", true)
+ if err != nil {
+ return err
+ }
+
+ schema, err := getFileParam(schemaFlag, schemaFileFlag, "schema", true)
+ if err != nil {
+ return err
+ }
+
+ vschema, err := getFileParam(vschemaFlag, vschemaFileFlag, "vschema", true)
+ if err != nil {
+ return err
+ }
+
+ ksShardMap, err := getFileParam(ksShardMapFlag, ksShardMapFileFlag, "ks-shard-map", false)
+ if err != nil {
+ return err
+ }
+
+ opts := &vtexplain.Options{
+ ExecutionMode: executionMode,
+ PlannerVersion: plannerVersion,
+ ReplicationMode: replicationMode,
+ NumShards: numShards,
+ Normalize: normalize,
+ Target: dbName,
+ }
+
+ vte, err := vtexplain.Init(context.Background(), vschema, schema, ksShardMap, opts)
+ if err != nil {
+ return err
+ }
+ defer vte.Stop()
+
+ plans, err := vte.Run(sql)
+ if err != nil {
+ return err
+ }
+
+ if outputMode == "text" {
+ fmt.Print(vte.ExplainsAsText(plans))
+ } else {
+ fmt.Print(vtexplain.ExplainsAsJSON(plans))
+ }
+
+ return nil
+}
diff --git a/go/cmd/vtexplain/docgen/main.go b/go/cmd/vtexplain/docgen/main.go
new file mode 100644
index 00000000000..15ea92b53bb
--- /dev/null
+++ b/go/cmd/vtexplain/docgen/main.go
@@ -0,0 +1,37 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/cmd/internal/docgen"
+ "vitess.io/vitess/go/cmd/vtexplain/cli"
+)
+
+func main() {
+ var dir string
+ cmd := cobra.Command{
+ Use: "docgen [-d ]",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return docgen.GenerateMarkdownTree(cli.Main, dir)
+ },
+ }
+
+ cmd.Flags().StringVarP(&dir, "dir", "d", "doc", "output directory to write documentation")
+ _ = cmd.Execute()
+}
diff --git a/go/cmd/vtexplain/vtexplain.go b/go/cmd/vtexplain/vtexplain.go
index d5f60a893ba..37774076382 100644
--- a/go/cmd/vtexplain/vtexplain.go
+++ b/go/cmd/vtexplain/vtexplain.go
@@ -18,151 +18,16 @@ package main
import (
"fmt"
- "os"
- "vitess.io/vitess/go/acl"
+ "vitess.io/vitess/go/cmd/vtexplain/cli"
"vitess.io/vitess/go/exit"
- "vitess.io/vitess/go/vt/logutil"
- "vitess.io/vitess/go/vt/servenv"
- "vitess.io/vitess/go/vt/vtexplain"
- "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
-
- "github.com/spf13/pflag"
-
- querypb "vitess.io/vitess/go/vt/proto/query"
-)
-
-var (
- sqlFlag string
- sqlFileFlag string
- schemaFlag string
- schemaFileFlag string
- vschemaFlag string
- vschemaFileFlag string
- ksShardMapFlag string
- ksShardMapFileFlag string
- normalize bool
- dbName string
- plannerVersionStr string
-
- numShards = 2
- replicationMode = "ROW"
- executionMode = "multi"
- outputMode = "text"
)
-func registerFlags(fs *pflag.FlagSet) {
- fs.StringVar(&sqlFlag, "sql", sqlFlag, "A list of semicolon-delimited SQL commands to analyze")
- fs.StringVar(&sqlFileFlag, "sql-file", sqlFileFlag, "Identifies the file that contains the SQL commands to analyze")
- fs.StringVar(&schemaFlag, "schema", schemaFlag, "The SQL table schema")
- fs.StringVar(&schemaFileFlag, "schema-file", schemaFileFlag, "Identifies the file that contains the SQL table schema")
- fs.StringVar(&vschemaFlag, "vschema", vschemaFlag, "Identifies the VTGate routing schema")
- fs.StringVar(&vschemaFileFlag, "vschema-file", vschemaFileFlag, "Identifies the VTGate routing schema file")
- fs.StringVar(&ksShardMapFlag, "ks-shard-map", ksShardMapFlag, "JSON map of keyspace name -> shard name -> ShardReference object. The inner map is the same as the output of FindAllShardsInKeyspace")
- fs.StringVar(&ksShardMapFileFlag, "ks-shard-map-file", ksShardMapFileFlag, "File containing json blob of keyspace name -> shard name -> ShardReference object")
- fs.StringVar(&replicationMode, "replication-mode", replicationMode, "The replication mode to simulate -- must be set to either ROW or STATEMENT")
- fs.BoolVar(&normalize, "normalize", normalize, "Whether to enable vtgate normalization")
- fs.StringVar(&dbName, "dbname", dbName, "Optional database target to override normal routing")
- fs.StringVar(&plannerVersionStr, "planner-version", plannerVersionStr, "Sets the query planner version to use when generating the explain output. Valid values are V3 and Gen4. An empty value will use VTGate's default planner")
- fs.IntVar(&numShards, "shards", numShards, "Number of shards per keyspace. Passing --ks-shard-map/--ks-shard-map-file causes this flag to be ignored.")
- fs.StringVar(&executionMode, "execution-mode", executionMode, "The execution mode to simulate -- must be set to multi, legacy-autocommit, or twopc")
- fs.StringVar(&outputMode, "output-mode", outputMode, "Output in human-friendly text or json")
-
- acl.RegisterFlags(fs)
-}
-
-func init() {
- servenv.OnParse(registerFlags)
-}
-
-// getFileParam returns a string containing either flag is not "",
-// or the content of the file named flagFile
-func getFileParam(flag, flagFile, name string, required bool) (string, error) {
- if flag != "" {
- if flagFile != "" {
- return "", fmt.Errorf("action requires only one of %v or %v-file", name, name)
- }
- return flag, nil
- }
-
- if flagFile == "" {
- if required {
- return "", fmt.Errorf("action requires one of %v or %v-file", name, name)
- }
-
- return "", nil
- }
- data, err := os.ReadFile(flagFile)
- if err != nil {
- return "", fmt.Errorf("cannot read file %v: %v", flagFile, err)
- }
- return string(data), nil
-}
-
func main() {
defer exit.RecoverAll()
- defer logutil.Flush()
- servenv.ParseFlags("vtexplain")
- servenv.Init()
- err := parseAndRun()
- if err != nil {
+ if err := cli.Main.Execute(); err != nil {
fmt.Printf("ERROR: %s\n", err)
exit.Return(1)
}
}
-
-func parseAndRun() error {
- plannerVersion, _ := plancontext.PlannerNameToVersion(plannerVersionStr)
- if plannerVersionStr != "" && plannerVersion != querypb.ExecuteOptions_V3 && plannerVersion != querypb.ExecuteOptions_Gen4 {
- return fmt.Errorf("invalid value specified for planner-version of '%s' -- valid values are V3 and Gen4 or an empty value to use the default planner", plannerVersionStr)
- }
-
- sql, err := getFileParam(sqlFlag, sqlFileFlag, "sql", true)
- if err != nil {
- return err
- }
-
- schema, err := getFileParam(schemaFlag, schemaFileFlag, "schema", true)
- if err != nil {
- return err
- }
-
- vschema, err := getFileParam(vschemaFlag, vschemaFileFlag, "vschema", true)
- if err != nil {
- return err
- }
-
- ksShardMap, err := getFileParam(ksShardMapFlag, ksShardMapFileFlag, "ks-shard-map", false)
- if err != nil {
- return err
- }
-
- opts := &vtexplain.Options{
- ExecutionMode: executionMode,
- PlannerVersion: plannerVersion,
- ReplicationMode: replicationMode,
- NumShards: numShards,
- Normalize: normalize,
- Target: dbName,
- }
-
- vte, err := vtexplain.Init(vschema, schema, ksShardMap, opts)
- if err != nil {
- return err
- }
- defer vte.Stop()
-
- plans, err := vte.Run(sql)
- if err != nil {
- return err
- }
-
- if outputMode == "text" {
- fmt.Print(vte.ExplainsAsText(plans))
- } else {
- fmt.Print(vtexplain.ExplainsAsJSON(plans))
- }
-
- return nil
-}
diff --git a/go/cmd/vtgate/cli/cli.go b/go/cmd/vtgate/cli/cli.go
new file mode 100644
index 00000000000..9182bfcf9a4
--- /dev/null
+++ b/go/cmd/vtgate/cli/cli.go
@@ -0,0 +1,192 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreedto in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cli
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/acl"
+ "vitess.io/vitess/go/exit"
+ "vitess.io/vitess/go/vt/discovery"
+ "vitess.io/vitess/go/vt/servenv"
+ "vitess.io/vitess/go/vt/srvtopo"
+ "vitess.io/vitess/go/vt/topo"
+ "vitess.io/vitess/go/vt/topo/topoproto"
+ "vitess.io/vitess/go/vt/vterrors"
+ "vitess.io/vitess/go/vt/vtgate"
+ "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
+
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+ "vitess.io/vitess/go/vt/proto/vtrpc"
+)
+
+var (
+ cell string
+ tabletTypesToWait []topodatapb.TabletType
+ plannerName string
+ resilientServer *srvtopo.ResilientServer
+
+ Main = &cobra.Command{
+ Use: "vtgate",
+ Short: "VTGate is a stateless proxy responsible for accepting requests from applications and routing them to the appropriate tablet server(s) for query execution. It speaks both the MySQL Protocol and a gRPC protocol.",
+ Long: `VTGate is a stateless proxy responsible for accepting requests from applications and routing them to the appropriate tablet server(s) for query execution. It speaks both the MySQL Protocol and a gRPC protocol.
+
+### Key Options
+` +
+ "\n* `--srv_topo_cache_ttl`: There may be instances where you will need to increase the cached TTL from the default of 1 second to a higher number:\n" +
+ ` * You may want to increase this option if you see that your topo leader goes down and keeps your queries waiting for a few seconds.`,
+ Example: `vtgate \
+ --topo_implementation etcd2 \
+ --topo_global_server_address localhost:2379 \
+ --topo_global_root /vitess/global \
+ --log_dir $VTDATAROOT/tmp \
+ --port 15001 \
+ --grpc_port 15991 \
+ --mysql_server_port 15306 \
+ --cell test \
+ --cells_to_watch test \
+ --tablet_types_to_wait PRIMARY,REPLICA \
+ --service_map 'grpc-vtgateservice' \
+ --pid_file $VTDATAROOT/tmp/vtgate.pid \
+ --mysql_auth_server_impl none`,
+ Args: cobra.NoArgs,
+ Version: servenv.AppVersion.String(),
+ PreRunE: servenv.CobraPreRunE,
+ RunE: run,
+ }
+)
+
+// CheckCellFlags will check validation of cell and cells_to_watch flag
+// it will help to avoid strange behaviors when vtgate runs but actually does not work
+func CheckCellFlags(ctx context.Context, serv srvtopo.Server, cell string, cellsToWatch string) error {
+ // topo check
+ var topoServer *topo.Server
+ if serv != nil {
+ var err error
+ topoServer, err = serv.GetTopoServer()
+ if err != nil {
+ return fmt.Errorf("Unable to create gateway: %w", err)
+ }
+ } else {
+ return fmt.Errorf("topo server cannot be nil")
+ }
+ cellsInTopo, err := topoServer.GetKnownCells(ctx)
+ if err != nil {
+ return err
+ }
+ if len(cellsInTopo) == 0 {
+ return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "topo server should have at least one cell")
+ }
+
+ // cell valid check
+ if cell == "" {
+ return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "cell flag must be set")
+ }
+ hasCell := false
+ for _, v := range cellsInTopo {
+ if v == cell {
+ hasCell = true
+ break
+ }
+ }
+ if !hasCell {
+ return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "cell:[%v] does not exist in topo", cell)
+ }
+
+ // cells_to_watch valid check
+ cells := make([]string, 0, 1)
+ for _, c := range strings.Split(cellsToWatch, ",") {
+ if c == "" {
+ continue
+ }
+ // cell should contained in cellsInTopo
+ if exists := topo.InCellList(c, cellsInTopo); !exists {
+ return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "cell: [%v] is not valid. Available cells: [%v]", c, strings.Join(cellsInTopo, ","))
+ }
+ cells = append(cells, c)
+ }
+ if len(cells) == 0 {
+ return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "cells_to_watch flag cannot be empty")
+ }
+
+ return nil
+}
+
+func run(cmd *cobra.Command, args []string) error {
+ defer exit.Recover()
+
+ servenv.Init()
+
+ ts := topo.Open()
+ defer ts.Close()
+
+ resilientServer = srvtopo.NewResilientServer(context.Background(), ts, "ResilientSrvTopoServer")
+
+ tabletTypes := make([]topodatapb.TabletType, 0, 1)
+ for _, tt := range tabletTypesToWait {
+ if topoproto.IsServingType(tt) {
+ tabletTypes = append(tabletTypes, tt)
+ }
+ }
+
+ if len(tabletTypes) == 0 {
+ return fmt.Errorf("tablet_types_to_wait must contain at least one serving tablet type")
+ }
+
+ err := CheckCellFlags(context.Background(), resilientServer, cell, vtgate.CellsToWatch)
+ if err != nil {
+ return fmt.Errorf("cells_to_watch validation failed: %v", err)
+ }
+
+ plannerVersion, _ := plancontext.PlannerNameToVersion(plannerName)
+
+ // pass nil for HealthCheck and it will be created
+ vtg := vtgate.Init(context.Background(), nil, resilientServer, cell, tabletTypes, plannerVersion)
+
+ servenv.OnRun(func() {
+ // Flags are parsed now. Parse the template using the actual flag value and overwrite the current template.
+ discovery.ParseTabletURLTemplateFromFlag()
+ addStatusParts(vtg)
+ })
+ servenv.OnClose(func() {
+ _ = vtg.Gateway().Close(context.Background())
+ })
+ servenv.RunDefault()
+
+ return nil
+}
+
+func init() {
+ servenv.RegisterDefaultFlags()
+ servenv.RegisterFlags()
+ servenv.RegisterGRPCServerFlags()
+ servenv.RegisterGRPCServerAuthFlags()
+ servenv.RegisterServiceMapFlag()
+
+ servenv.MoveFlagsToCobraCommand(Main)
+
+ acl.RegisterFlags(Main.Flags())
+ Main.Flags().StringVar(&cell, "cell", cell, "cell to use")
+ Main.Flags().Var((*topoproto.TabletTypeListFlag)(&tabletTypesToWait), "tablet_types_to_wait", "Wait till connected for specified tablet types during Gateway initialization. Should be provided as a comma-separated set of tablet types.")
+ Main.Flags().StringVar(&plannerName, "planner-version", plannerName, "Sets the default planner to use when the session has not changed it. Valid values are: Gen4, Gen4Greedy, Gen4Left2Right")
+
+ Main.MarkFlagRequired("tablet_types_to_wait")
+}
diff --git a/go/cmd/vtgate/plugin_auth_clientcert.go b/go/cmd/vtgate/cli/plugin_auth_clientcert.go
similarity index 98%
rename from go/cmd/vtgate/plugin_auth_clientcert.go
rename to go/cmd/vtgate/cli/plugin_auth_clientcert.go
index 4f3d65ef626..1a1334e71ba 100644
--- a/go/cmd/vtgate/plugin_auth_clientcert.go
+++ b/go/cmd/vtgate/cli/plugin_auth_clientcert.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
// This plugin imports clientcert to register the client certificate implementation of AuthServer.
diff --git a/go/cmd/vtgate/plugin_auth_ldap.go b/go/cmd/vtgate/cli/plugin_auth_ldap.go
similarity index 98%
rename from go/cmd/vtgate/plugin_auth_ldap.go
rename to go/cmd/vtgate/cli/plugin_auth_ldap.go
index 257f0742733..7dc5b246f72 100644
--- a/go/cmd/vtgate/plugin_auth_ldap.go
+++ b/go/cmd/vtgate/cli/plugin_auth_ldap.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
// This plugin imports ldapauthserver to register the LDAP implementation of AuthServer.
diff --git a/go/cmd/vtgate/plugin_auth_static.go b/go/cmd/vtgate/cli/plugin_auth_static.go
similarity index 98%
rename from go/cmd/vtgate/plugin_auth_static.go
rename to go/cmd/vtgate/cli/plugin_auth_static.go
index 8e4a552cecf..9ffd60a79f2 100644
--- a/go/cmd/vtgate/plugin_auth_static.go
+++ b/go/cmd/vtgate/cli/plugin_auth_static.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
// This plugin imports staticauthserver to register the flat-file implementation of AuthServer.
diff --git a/go/cmd/vtgate/plugin_auth_vault.go b/go/cmd/vtgate/cli/plugin_auth_vault.go
similarity index 98%
rename from go/cmd/vtgate/plugin_auth_vault.go
rename to go/cmd/vtgate/cli/plugin_auth_vault.go
index ca271b496ca..2aee32e3940 100644
--- a/go/cmd/vtgate/plugin_auth_vault.go
+++ b/go/cmd/vtgate/cli/plugin_auth_vault.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
// This plugin imports InitAuthServerVault to register the HashiCorp Vault implementation of AuthServer.
diff --git a/go/cmd/topo2topo/plugin_consultopo.go b/go/cmd/vtgate/cli/plugin_consultopo.go
similarity index 98%
rename from go/cmd/topo2topo/plugin_consultopo.go
rename to go/cmd/vtgate/cli/plugin_consultopo.go
index 59d6774fdbc..a128f294a42 100644
--- a/go/cmd/topo2topo/plugin_consultopo.go
+++ b/go/cmd/vtgate/cli/plugin_consultopo.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
// This plugin imports consultopo to register the consul implementation of TopoServer.
diff --git a/go/cmd/vttablet/plugin_etcd2topo.go b/go/cmd/vtgate/cli/plugin_etcd2topo.go
similarity index 98%
rename from go/cmd/vttablet/plugin_etcd2topo.go
rename to go/cmd/vtgate/cli/plugin_etcd2topo.go
index d99ef51d4af..5a51923cf00 100644
--- a/go/cmd/vttablet/plugin_etcd2topo.go
+++ b/go/cmd/vtgate/cli/plugin_etcd2topo.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
// This plugin imports etcd2topo to register the etcd2 implementation of TopoServer.
diff --git a/go/cmd/vtgate/plugin_grpctabletconn.go b/go/cmd/vtgate/cli/plugin_grpctabletconn.go
similarity index 98%
rename from go/cmd/vtgate/plugin_grpctabletconn.go
rename to go/cmd/vtgate/cli/plugin_grpctabletconn.go
index 08291a7c916..4a97e36eec4 100644
--- a/go/cmd/vtgate/plugin_grpctabletconn.go
+++ b/go/cmd/vtgate/cli/plugin_grpctabletconn.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
// Imports and register the gRPC tabletconn client
diff --git a/go/cmd/vtgate/plugin_grpcvtgateservice.go b/go/cmd/vtgate/cli/plugin_grpcvtgateservice.go
similarity index 98%
rename from go/cmd/vtgate/plugin_grpcvtgateservice.go
rename to go/cmd/vtgate/cli/plugin_grpcvtgateservice.go
index 4ee159710ca..bbbc6e3039e 100644
--- a/go/cmd/vtgate/plugin_grpcvtgateservice.go
+++ b/go/cmd/vtgate/cli/plugin_grpcvtgateservice.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
// Imports and register the gRPC vtgateservice server
diff --git a/go/cmd/vtgate/plugin_opentracing.go b/go/cmd/vtgate/cli/plugin_opentracing.go
similarity index 98%
rename from go/cmd/vtgate/plugin_opentracing.go
rename to go/cmd/vtgate/cli/plugin_opentracing.go
index 9a6786d3d64..7ec15423f5a 100644
--- a/go/cmd/vtgate/plugin_opentracing.go
+++ b/go/cmd/vtgate/cli/plugin_opentracing.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
import (
"vitess.io/vitess/go/trace"
diff --git a/go/cmd/vtgate/plugin_opentsdb.go b/go/cmd/vtgate/cli/plugin_opentsdb.go
similarity index 98%
rename from go/cmd/vtgate/plugin_opentsdb.go
rename to go/cmd/vtgate/cli/plugin_opentsdb.go
index 0988f3b9a64..37c81f271c9 100644
--- a/go/cmd/vtgate/plugin_opentsdb.go
+++ b/go/cmd/vtgate/cli/plugin_opentsdb.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
// This plugin imports opentsdb to register the opentsdb stats backend.
diff --git a/go/cmd/vtgate/plugin_prometheusbackend.go b/go/cmd/vtgate/cli/plugin_prometheusbackend.go
similarity index 98%
rename from go/cmd/vtgate/plugin_prometheusbackend.go
rename to go/cmd/vtgate/cli/plugin_prometheusbackend.go
index 6bffd133332..a1797abdcd1 100644
--- a/go/cmd/vtgate/plugin_prometheusbackend.go
+++ b/go/cmd/vtgate/cli/plugin_prometheusbackend.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
// This plugin imports Prometheus to allow for instrumentation
// with the Prometheus client library
diff --git a/go/cmd/vtgate/cli/plugin_statsd.go b/go/cmd/vtgate/cli/plugin_statsd.go
new file mode 100644
index 00000000000..fc42fa4f447
--- /dev/null
+++ b/go/cmd/vtgate/cli/plugin_statsd.go
@@ -0,0 +1,23 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cli
+
+import "vitess.io/vitess/go/stats/statsd"
+
+func init() {
+ statsd.Init("vtgate")
+}
diff --git a/go/cmd/vtgate/plugin_zk2topo.go b/go/cmd/vtgate/cli/plugin_zk2topo.go
similarity index 98%
rename from go/cmd/vtgate/plugin_zk2topo.go
rename to go/cmd/vtgate/cli/plugin_zk2topo.go
index d75a1c6bcb4..1870a3b2bb3 100644
--- a/go/cmd/vtgate/plugin_zk2topo.go
+++ b/go/cmd/vtgate/cli/plugin_zk2topo.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
import (
// Imports and register the zk2 TopologyServer
diff --git a/go/cmd/vtgate/status.go b/go/cmd/vtgate/cli/status.go
similarity index 96%
rename from go/cmd/vtgate/status.go
rename to go/cmd/vtgate/cli/status.go
index 436a1301438..2fdab073d5a 100644
--- a/go/cmd/vtgate/status.go
+++ b/go/cmd/vtgate/cli/status.go
@@ -14,13 +14,12 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
import (
"vitess.io/vitess/go/vt/discovery"
"vitess.io/vitess/go/vt/servenv"
"vitess.io/vitess/go/vt/srvtopo"
- _ "vitess.io/vitess/go/vt/status"
"vitess.io/vitess/go/vt/vtgate"
)
diff --git a/go/cmd/vtgate/docgen/main.go b/go/cmd/vtgate/docgen/main.go
new file mode 100644
index 00000000000..763d38b7e7b
--- /dev/null
+++ b/go/cmd/vtgate/docgen/main.go
@@ -0,0 +1,42 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "github.com/spf13/cobra"
+ "github.com/spf13/pflag"
+
+ "vitess.io/vitess/go/cmd/internal/docgen"
+ "vitess.io/vitess/go/cmd/vtgate/cli"
+)
+
+func main() {
+ var dir string
+ cmd := cobra.Command{
+ Use: "docgen [-d ]",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return docgen.GenerateMarkdownTree(cli.Main, dir)
+ },
+ }
+
+ // Here because we inadvertently transfer the required "tablet-types-to-wait"
+ // flag during vtgate/cli's init func.
+ pflag.CommandLine = cmd.Flags()
+
+ cmd.Flags().StringVarP(&dir, "dir", "d", "doc", "output directory to write documentation")
+ _ = cmd.Execute()
+}
diff --git a/go/cmd/vtgate/plugin_kubernetestopo.go b/go/cmd/vtgate/plugin_kubernetestopo.go
deleted file mode 100644
index 671d0c8321f..00000000000
--- a/go/cmd/vtgate/plugin_kubernetestopo.go
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
-Copyright 2020 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package main
-
-// This plugin imports k8stopo to register the kubernetes implementation of TopoServer.
-
-import (
- _ "vitess.io/vitess/go/vt/topo/k8stopo"
-)
diff --git a/go/cmd/vtgate/plugin_statsd.go b/go/cmd/vtgate/plugin_statsd.go
deleted file mode 100644
index ae2ecb5b2e0..00000000000
--- a/go/cmd/vtgate/plugin_statsd.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package main
-
-import "vitess.io/vitess/go/stats/statsd"
-
-func init() {
- statsd.Init("vtgate")
-}
diff --git a/go/cmd/vtgate/vtgate.go b/go/cmd/vtgate/vtgate.go
index f4fc21000a2..fd81fe85a68 100644
--- a/go/cmd/vtgate/vtgate.go
+++ b/go/cmd/vtgate/vtgate.go
@@ -17,153 +17,12 @@ limitations under the License.
package main
import (
- "context"
- "math/rand"
- "strings"
- "time"
-
- "github.com/spf13/pflag"
-
- "vitess.io/vitess/go/acl"
- "vitess.io/vitess/go/exit"
- "vitess.io/vitess/go/vt/discovery"
+ "vitess.io/vitess/go/cmd/vtgate/cli"
"vitess.io/vitess/go/vt/log"
- topodatapb "vitess.io/vitess/go/vt/proto/topodata"
- "vitess.io/vitess/go/vt/proto/vtrpc"
- "vitess.io/vitess/go/vt/servenv"
- "vitess.io/vitess/go/vt/srvtopo"
- "vitess.io/vitess/go/vt/topo"
- "vitess.io/vitess/go/vt/topo/topoproto"
- "vitess.io/vitess/go/vt/vterrors"
- "vitess.io/vitess/go/vt/vtgate"
- "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext"
)
-var (
- cell = ""
- tabletTypesToWait []topodatapb.TabletType
- plannerName string
-)
-
-func registerFlags(fs *pflag.FlagSet) {
- fs.StringVar(&cell, "cell", cell, "cell to use")
- fs.Var((*topoproto.TabletTypeListFlag)(&tabletTypesToWait), "tablet_types_to_wait", "Wait till connected for specified tablet types during Gateway initialization. Should be provided as a comma-separated set of tablet types.")
- fs.StringVar(&plannerName, "planner-version", plannerName, "Sets the default planner to use when the session has not changed it. Valid values are: V3, V3Insert, Gen4, Gen4Greedy and Gen4Fallback. Gen4Fallback tries the gen4 planner and falls back to the V3 planner if the gen4 fails.")
-
- acl.RegisterFlags(fs)
-}
-
-var resilientServer *srvtopo.ResilientServer
-
-func init() {
- rand.Seed(time.Now().UnixNano())
- servenv.RegisterDefaultFlags()
- servenv.RegisterFlags()
- servenv.RegisterGRPCServerFlags()
- servenv.RegisterGRPCServerAuthFlags()
- servenv.RegisterServiceMapFlag()
- servenv.OnParse(registerFlags)
-}
-
-// CheckCellFlags will check validation of cell and cells_to_watch flag
-// it will help to avoid strange behaviors when vtgate runs but actually does not work
-func CheckCellFlags(ctx context.Context, serv srvtopo.Server, cell string, cellsToWatch string) error {
- // topo check
- var topoServer *topo.Server
- if serv != nil {
- var err error
- topoServer, err = serv.GetTopoServer()
- if err != nil {
- log.Exitf("Unable to create gateway: %v", err)
- }
- } else {
- log.Exitf("topo server cannot be nil")
- }
- cellsInTopo, err := topoServer.GetKnownCells(ctx)
- if err != nil {
- return err
- }
- if len(cellsInTopo) == 0 {
- return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "topo server should have at least one cell")
- }
-
- // cell valid check
- if cell == "" {
- return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "cell flag must be set")
- }
- hasCell := false
- for _, v := range cellsInTopo {
- if v == cell {
- hasCell = true
- break
- }
- }
- if !hasCell {
- return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "cell:[%v] does not exist in topo", cell)
- }
-
- // cells_to_watch valid check
- cells := make([]string, 0, 1)
- for _, c := range strings.Split(cellsToWatch, ",") {
- if c == "" {
- continue
- }
- // cell should contained in cellsInTopo
- if exists := topo.InCellList(c, cellsInTopo); !exists {
- return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "cell: [%v] is not valid. Available cells: [%v]", c, strings.Join(cellsInTopo, ","))
- }
- cells = append(cells, c)
- }
- if len(cells) == 0 {
- return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "cells_to_watch flag cannot be empty")
- }
-
- return nil
-}
-
func main() {
- defer exit.Recover()
-
- servenv.ParseFlags("vtgate")
- servenv.Init()
-
- ts := topo.Open()
- defer ts.Close()
-
- resilientServer = srvtopo.NewResilientServer(ts, "ResilientSrvTopoServer")
-
- tabletTypes := make([]topodatapb.TabletType, 0, 1)
- if len(tabletTypesToWait) != 0 {
- for _, tt := range tabletTypesToWait {
- if topoproto.IsServingType(tt) {
- tabletTypes = append(tabletTypes, tt)
- }
- }
- } else {
- log.Exitf("tablet_types_to_wait flag must be set")
- }
-
- if len(tabletTypes) == 0 {
- log.Exitf("tablet_types_to_wait should contain at least one serving tablet type")
- }
-
- err := CheckCellFlags(context.Background(), resilientServer, cell, vtgate.CellsToWatch)
- if err != nil {
- log.Exitf("cells_to_watch validation failed: %v", err)
+ if err := cli.Main.Execute(); err != nil {
+ log.Exit(err)
}
-
- plannerVersion, _ := plancontext.PlannerNameToVersion(plannerName)
-
- // pass nil for HealthCheck and it will be created
- vtg := vtgate.Init(context.Background(), nil, resilientServer, cell, tabletTypes, plannerVersion)
-
- servenv.OnRun(func() {
- // Flags are parsed now. Parse the template using the actual flag value and overwrite the current template.
- discovery.ParseTabletURLTemplateFromFlag()
- addStatusParts(vtg)
- })
- servenv.OnClose(func() {
- _ = vtg.Gateway().Close(context.Background())
- })
- servenv.RunDefault()
}
diff --git a/go/cmd/vtgateclienttest/cli/main.go b/go/cmd/vtgateclienttest/cli/main.go
new file mode 100644
index 00000000000..a30cebe418d
--- /dev/null
+++ b/go/cmd/vtgateclienttest/cli/main.go
@@ -0,0 +1,64 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package cli is the implementation of vtgateclienttest.
+// This program has a chain of vtgateservice.VTGateService implementations,
+// each one being responsible for one test scenario.
+package cli
+
+import (
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/acl"
+ "vitess.io/vitess/go/cmd/vtgateclienttest/services"
+ "vitess.io/vitess/go/vt/servenv"
+ "vitess.io/vitess/go/vt/vtgate"
+)
+
+var Main = &cobra.Command{
+ Use: "vtgateclienttest",
+ Short: "vtgateclienttest is a chain of vtgateservice.VTGateService implementations, each one being responsible for one test scenario.",
+ Args: cobra.NoArgs,
+ PreRunE: servenv.CobraPreRunE,
+ RunE: run,
+}
+
+func init() {
+ servenv.RegisterDefaultFlags()
+ servenv.RegisterFlags()
+ servenv.RegisterGRPCServerFlags()
+ servenv.RegisterGRPCServerAuthFlags()
+ servenv.RegisterServiceMapFlag()
+
+ servenv.MoveFlagsToCobraCommand(Main)
+
+ acl.RegisterFlags(Main.Flags())
+}
+
+func run(cmd *cobra.Command, args []string) error {
+ servenv.Init()
+
+ // The implementation chain.
+ servenv.OnRun(func() {
+ s := services.CreateServices()
+ for _, f := range vtgate.RegisterVTGates {
+ f(s)
+ }
+ })
+
+ servenv.RunDefault()
+ return nil
+}
diff --git a/go/cmd/vtgateclienttest/plugin_grpcvtgateservice.go b/go/cmd/vtgateclienttest/cli/plugin_grpcvtgateservice.go
similarity index 98%
rename from go/cmd/vtgateclienttest/plugin_grpcvtgateservice.go
rename to go/cmd/vtgateclienttest/cli/plugin_grpcvtgateservice.go
index 4ee159710ca..bbbc6e3039e 100644
--- a/go/cmd/vtgateclienttest/plugin_grpcvtgateservice.go
+++ b/go/cmd/vtgateclienttest/cli/plugin_grpcvtgateservice.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
// Imports and register the gRPC vtgateservice server
diff --git a/go/cmd/vtgateclienttest/docgen/main.go b/go/cmd/vtgateclienttest/docgen/main.go
new file mode 100644
index 00000000000..3a18cd6feeb
--- /dev/null
+++ b/go/cmd/vtgateclienttest/docgen/main.go
@@ -0,0 +1,37 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/cmd/internal/docgen"
+ "vitess.io/vitess/go/cmd/vtgateclienttest/cli"
+)
+
+func main() {
+ var dir string
+ cmd := cobra.Command{
+ Use: "docgen [-d ]",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return docgen.GenerateMarkdownTree(cli.Main, dir)
+ },
+ }
+
+ cmd.Flags().StringVarP(&dir, "dir", "d", "doc", "output directory to write documentation")
+ _ = cmd.Execute()
+}
diff --git a/go/cmd/vtgateclienttest/main.go b/go/cmd/vtgateclienttest/main.go
index 2623ab84893..313b27de04a 100644
--- a/go/cmd/vtgateclienttest/main.go
+++ b/go/cmd/vtgateclienttest/main.go
@@ -14,46 +14,18 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-// Package main is the implementation of vtgateclienttest.
-// This program has a chain of vtgateservice.VTGateService implementations,
-// each one being responsible for one test scenario.
package main
import (
- "github.com/spf13/pflag"
-
- "vitess.io/vitess/go/acl"
- "vitess.io/vitess/go/cmd/vtgateclienttest/services"
+ "vitess.io/vitess/go/cmd/vtgateclienttest/cli"
"vitess.io/vitess/go/exit"
- "vitess.io/vitess/go/vt/servenv"
- "vitess.io/vitess/go/vt/vtgate"
+ "vitess.io/vitess/go/vt/log"
)
-func init() {
- servenv.RegisterDefaultFlags()
- servenv.RegisterFlags()
- servenv.RegisterGRPCServerFlags()
- servenv.RegisterGRPCServerAuthFlags()
- servenv.RegisterServiceMapFlag()
-
- servenv.OnParse(func(fs *pflag.FlagSet) {
- acl.RegisterFlags(fs)
- })
-}
-
func main() {
defer exit.Recover()
- servenv.ParseFlags("vtgateclienttest")
- servenv.Init()
-
- // The implementation chain.
- servenv.OnRun(func() {
- s := services.CreateServices()
- for _, f := range vtgate.RegisterVTGates {
- f(s)
- }
- })
-
- servenv.RunDefault()
+ if err := cli.Main.Execute(); err != nil {
+ log.Exitf("%s", err)
+ }
}
diff --git a/go/cmd/vtgateclienttest/services/callerid.go b/go/cmd/vtgateclienttest/services/callerid.go
index d5334e2272a..db3fa07acf5 100644
--- a/go/cmd/vtgateclienttest/services/callerid.go
+++ b/go/cmd/vtgateclienttest/services/callerid.go
@@ -17,23 +17,21 @@ limitations under the License.
package services
import (
+ "context"
"encoding/json"
"fmt"
"strings"
"vitess.io/vitess/go/mysql"
- "context"
-
"google.golang.org/protobuf/proto"
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/vt/callerid"
- "vitess.io/vitess/go/vt/vtgate/vtgateservice"
-
querypb "vitess.io/vitess/go/vt/proto/query"
vtgatepb "vitess.io/vitess/go/vt/proto/vtgate"
vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
+ "vitess.io/vitess/go/vt/vtgate/vtgateservice"
)
// CallerIDPrefix is the prefix to send with queries so they go
@@ -79,11 +77,11 @@ func (c *callerIDClient) checkCallerID(ctx context.Context, received string) (bo
return true, fmt.Errorf("SUCCESS: callerid matches")
}
-func (c *callerIDClient) Execute(ctx context.Context, conn *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, *sqltypes.Result, error) {
+func (c *callerIDClient) Execute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, conn *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, *sqltypes.Result, error) {
if ok, err := c.checkCallerID(ctx, sql); ok {
return session, nil, err
}
- return c.fallbackClient.Execute(ctx, session, sql, bindVariables)
+ return c.fallbackClient.Execute(ctx, mysqlCtx, conn, session, sql, bindVariables)
}
func (c *callerIDClient) ExecuteBatch(ctx context.Context, conn *mysql.Conn, session *vtgatepb.Session, sqlList []string, bindVariablesList []map[string]*querypb.BindVariable) (*vtgatepb.Session, []sqltypes.QueryResponse, error) {
@@ -95,9 +93,9 @@ func (c *callerIDClient) ExecuteBatch(ctx context.Context, conn *mysql.Conn, ses
return c.fallbackClient.ExecuteBatch(ctx, session, sqlList, bindVariablesList)
}
-func (c *callerIDClient) StreamExecute(ctx context.Context, conn *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) (*vtgatepb.Session, error) {
+func (c *callerIDClient) StreamExecute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, conn *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) (*vtgatepb.Session, error) {
if ok, err := c.checkCallerID(ctx, sql); ok {
return session, err
}
- return c.fallbackClient.StreamExecute(ctx, session, sql, bindVariables, callback)
+ return c.fallbackClient.StreamExecute(ctx, mysqlCtx, nil, session, sql, bindVariables, callback)
}
diff --git a/go/cmd/vtgateclienttest/services/echo.go b/go/cmd/vtgateclienttest/services/echo.go
index 27b77cda3d8..9b9f12c039b 100644
--- a/go/cmd/vtgateclienttest/services/echo.go
+++ b/go/cmd/vtgateclienttest/services/echo.go
@@ -100,7 +100,7 @@ func echoQueryResult(vals map[string]any) *sqltypes.Result {
return qr
}
-func (c *echoClient) Execute(ctx context.Context, conn *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, *sqltypes.Result, error) {
+func (c *echoClient) Execute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, conn *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, *sqltypes.Result, error) {
if strings.HasPrefix(sql, EchoPrefix) {
return session, echoQueryResult(map[string]any{
"callerId": callerid.EffectiveCallerIDFromContext(ctx),
@@ -109,10 +109,10 @@ func (c *echoClient) Execute(ctx context.Context, conn *mysql.Conn, session *vtg
"session": session,
}), nil
}
- return c.fallbackClient.Execute(ctx, session, sql, bindVariables)
+ return c.fallbackClient.Execute(ctx, mysqlCtx, conn, session, sql, bindVariables)
}
-func (c *echoClient) StreamExecute(ctx context.Context, conn *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) (*vtgatepb.Session, error) {
+func (c *echoClient) StreamExecute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, conn *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) (*vtgatepb.Session, error) {
if strings.HasPrefix(sql, EchoPrefix) {
callback(echoQueryResult(map[string]any{
"callerId": callerid.EffectiveCallerIDFromContext(ctx),
@@ -122,7 +122,7 @@ func (c *echoClient) StreamExecute(ctx context.Context, conn *mysql.Conn, sessio
}))
return session, nil
}
- return c.fallbackClient.StreamExecute(ctx, session, sql, bindVariables, callback)
+ return c.fallbackClient.StreamExecute(ctx, mysqlCtx, conn, session, sql, bindVariables, callback)
}
func (c *echoClient) ExecuteBatch(ctx context.Context, conn *mysql.Conn, session *vtgatepb.Session, sqlList []string, bindVariablesList []map[string]*querypb.BindVariable) (*vtgatepb.Session, []sqltypes.QueryResponse, error) {
diff --git a/go/cmd/vtgateclienttest/services/errors.go b/go/cmd/vtgateclienttest/services/errors.go
index d62c87ec776..611acbe6b8e 100644
--- a/go/cmd/vtgateclienttest/services/errors.go
+++ b/go/cmd/vtgateclienttest/services/errors.go
@@ -113,14 +113,14 @@ func trimmedRequestToError(received string) error {
}
}
-func (c *errorClient) Execute(ctx context.Context, conn *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, *sqltypes.Result, error) {
+func (c *errorClient) Execute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, conn *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, *sqltypes.Result, error) {
if err := requestToPartialError(sql, session); err != nil {
return session, nil, err
}
if err := requestToError(sql); err != nil {
return session, nil, err
}
- return c.fallbackClient.Execute(ctx, session, sql, bindVariables)
+ return c.fallbackClient.Execute(ctx, mysqlCtx, conn, session, sql, bindVariables)
}
func (c *errorClient) ExecuteBatch(ctx context.Context, conn *mysql.Conn, session *vtgatepb.Session, sqlList []string, bindVariablesList []map[string]*querypb.BindVariable) (*vtgatepb.Session, []sqltypes.QueryResponse, error) {
@@ -135,11 +135,11 @@ func (c *errorClient) ExecuteBatch(ctx context.Context, conn *mysql.Conn, sessio
return c.fallbackClient.ExecuteBatch(ctx, session, sqlList, bindVariablesList)
}
-func (c *errorClient) StreamExecute(ctx context.Context, conn *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) (*vtgatepb.Session, error) {
+func (c *errorClient) StreamExecute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, conn *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) (*vtgatepb.Session, error) {
if err := requestToError(sql); err != nil {
return session, err
}
- return c.fallbackClient.StreamExecute(ctx, session, sql, bindVariables, callback)
+ return c.fallbackClient.StreamExecute(ctx, mysqlCtx, conn, session, sql, bindVariables, callback)
}
func (c *errorClient) Prepare(ctx context.Context, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, []*querypb.Field, error) {
diff --git a/go/cmd/vtgateclienttest/services/fallback.go b/go/cmd/vtgateclienttest/services/fallback.go
index 08665e0e2eb..1892c8943f4 100644
--- a/go/cmd/vtgateclienttest/services/fallback.go
+++ b/go/cmd/vtgateclienttest/services/fallback.go
@@ -19,6 +19,8 @@ package services
import (
"context"
+ "vitess.io/vitess/go/mysql"
+
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/vt/vtgate/vtgateservice"
@@ -40,16 +42,16 @@ func newFallbackClient(fallback vtgateservice.VTGateService) fallbackClient {
return fallbackClient{fallback: fallback}
}
-func (c fallbackClient) Execute(ctx context.Context, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, *sqltypes.Result, error) {
- return c.fallback.Execute(ctx, nil, session, sql, bindVariables)
+func (c fallbackClient) Execute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, conn *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, *sqltypes.Result, error) {
+ return c.fallback.Execute(ctx, mysqlCtx, nil, session, sql, bindVariables)
}
func (c fallbackClient) ExecuteBatch(ctx context.Context, session *vtgatepb.Session, sqlList []string, bindVariablesList []map[string]*querypb.BindVariable) (*vtgatepb.Session, []sqltypes.QueryResponse, error) {
return c.fallback.ExecuteBatch(ctx, nil, session, sqlList, bindVariablesList)
}
-func (c fallbackClient) StreamExecute(ctx context.Context, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) (*vtgatepb.Session, error) {
- return c.fallback.StreamExecute(ctx, nil, session, sql, bindVariables, callback)
+func (c fallbackClient) StreamExecute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, conn *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) (*vtgatepb.Session, error) {
+ return c.fallback.StreamExecute(ctx, mysqlCtx, nil, session, sql, bindVariables, callback)
}
func (c fallbackClient) Prepare(ctx context.Context, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, []*querypb.Field, error) {
diff --git a/go/cmd/vtgateclienttest/services/terminal.go b/go/cmd/vtgateclienttest/services/terminal.go
index d3ca1f49902..0ce1375bb27 100644
--- a/go/cmd/vtgateclienttest/services/terminal.go
+++ b/go/cmd/vtgateclienttest/services/terminal.go
@@ -17,12 +17,13 @@ limitations under the License.
package services
import (
+ "context"
"errors"
"fmt"
"vitess.io/vitess/go/mysql"
- "context"
+ "vitess.io/vitess/go/vt/vtgate/vtgateservice"
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/tb"
@@ -44,7 +45,7 @@ func newTerminalClient() *terminalClient {
return &terminalClient{}
}
-func (c *terminalClient) Execute(ctx context.Context, conn *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, *sqltypes.Result, error) {
+func (c *terminalClient) Execute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, conn *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, *sqltypes.Result, error) {
if sql == "quit://" {
log.Fatal("Received quit:// query. Going down.")
}
@@ -60,7 +61,7 @@ func (c *terminalClient) ExecuteBatch(ctx context.Context, conn *mysql.Conn, ses
return session, nil, errTerminal
}
-func (c *terminalClient) StreamExecute(ctx context.Context, conn *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) (*vtgatepb.Session, error) {
+func (c *terminalClient) StreamExecute(ctx context.Context, mysqlCtx vtgateservice.MySQLConnection, conn *mysql.Conn, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable, callback func(*sqltypes.Result) error) (*vtgatepb.Session, error) {
return session, errTerminal
}
diff --git a/go/cmd/vtgr/main.go b/go/cmd/vtgr/main.go
deleted file mode 100644
index bc403f2aa67..00000000000
--- a/go/cmd/vtgr/main.go
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
-Copyright 2021 The Vitess Authors.
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package main
-
-import (
- "context"
- "fmt"
-
- "github.com/spf13/pflag"
-
- "vitess.io/vitess/go/acl"
- "vitess.io/vitess/go/vt/log"
- "vitess.io/vitess/go/vt/servenv"
- "vitess.io/vitess/go/vt/vtgr"
-)
-
-const deprecationMsg = "vtgr is deprecated and will be removed in Vitess 18. We recommend using VTOrc with semi-sync replication instead."
-
-func main() {
- fmt.Println(deprecationMsg)
-
- var clustersToWatch []string
- servenv.OnParseFor("vtgr", func(fs *pflag.FlagSet) {
- fs.StringSliceVar(&clustersToWatch, "clusters_to_watch", nil, `Comma-separated list of keyspaces or keyspace/shards that this instance will monitor and repair. Defaults to all clusters in the topology. Example: "ks1,ks2/-80"`)
-
- acl.RegisterFlags(fs)
- })
- servenv.ParseFlags("vtgr")
-
- log.Warning(deprecationMsg)
-
- // openTabletDiscovery will open up a connection to topo server
- // and populate the tablets in memory
- vtgr := vtgr.OpenTabletDiscovery(context.Background(), nil, clustersToWatch)
- vtgr.RefreshCluster()
- vtgr.ScanAndRepair()
-
- // block here so that we don't exit directly
- select {}
-}
diff --git a/go/cmd/vtorc/cli/cli.go b/go/cmd/vtorc/cli/cli.go
new file mode 100644
index 00000000000..f521ae05e57
--- /dev/null
+++ b/go/cmd/vtorc/cli/cli.go
@@ -0,0 +1,103 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cli
+
+import (
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/acl"
+ "vitess.io/vitess/go/vt/log"
+ "vitess.io/vitess/go/vt/servenv"
+ "vitess.io/vitess/go/vt/vtorc/config"
+ "vitess.io/vitess/go/vt/vtorc/inst"
+ "vitess.io/vitess/go/vt/vtorc/logic"
+ "vitess.io/vitess/go/vt/vtorc/server"
+)
+
+var (
+ configFile string
+ Main = &cobra.Command{
+ Use: "vtorc",
+ Short: "VTOrc is the automated fault detection and repair tool in Vitess.",
+ Example: `vtorc \
+ --topo_implementation etcd2 \
+ --topo_global_server_address localhost:2379 \
+ --topo_global_root /vitess/global \
+ --log_dir $VTDATAROOT/tmp \
+ --port 15000 \
+ --recovery-period-block-duration "10m" \
+ --instance-poll-time "1s" \
+ --topo-information-refresh-duration "30s" \
+ --alsologtostderr`,
+ Args: cobra.NoArgs,
+ Version: servenv.AppVersion.String(),
+ PreRunE: servenv.CobraPreRunE,
+ Run: run,
+ }
+)
+
+func run(cmd *cobra.Command, args []string) {
+ servenv.Init()
+ config.UpdateConfigValuesFromFlags()
+ inst.RegisterStats()
+
+ log.Info("starting vtorc")
+ if len(configFile) > 0 {
+ config.ForceRead(configFile)
+ } else {
+ config.Read("/etc/vtorc.conf.json", "conf/vtorc.conf.json", "vtorc.conf.json")
+ }
+ if config.Config.AuditToSyslog {
+ inst.EnableAuditSyslog()
+ }
+ config.MarkConfigurationLoaded()
+
+ // Log final config values to debug if something goes wrong.
+ config.LogConfigValues()
+ server.StartVTOrcDiscovery()
+
+ server.RegisterVTOrcAPIEndpoints()
+ servenv.OnRun(func() {
+ addStatusParts()
+ })
+
+ // For backward compatability, we require that VTOrc functions even when the --port flag is not provided.
+ // In this case, it should function like before but without the servenv pages.
+ // Therefore, currently we don't check for the --port flag to be necessary, but release 16+ that check
+ // can be added to always have the serenv page running in VTOrc.
+ servenv.RunDefault()
+}
+
+// addStatusParts adds UI parts to the /debug/status page of VTOrc
+func addStatusParts() {
+ servenv.AddStatusPart("Recent Recoveries", logic.TopologyRecoveriesTemplate, func() any {
+ recoveries, _ := logic.ReadRecentRecoveries(false, 0)
+ return recoveries
+ })
+}
+
+func init() {
+ servenv.RegisterDefaultFlags()
+ servenv.RegisterFlags()
+
+ servenv.MoveFlagsToCobraCommand(Main)
+
+ logic.RegisterFlags(Main.Flags())
+ config.RegisterFlags(Main.Flags())
+ acl.RegisterFlags(Main.Flags())
+ Main.Flags().StringVar(&configFile, "config", "", "config file name")
+}
diff --git a/go/cmd/vtgate/plugin_consultopo.go b/go/cmd/vtorc/cli/plugin_consultopo.go
similarity index 98%
rename from go/cmd/vtgate/plugin_consultopo.go
rename to go/cmd/vtorc/cli/plugin_consultopo.go
index 59d6774fdbc..a128f294a42 100644
--- a/go/cmd/vtgate/plugin_consultopo.go
+++ b/go/cmd/vtorc/cli/plugin_consultopo.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
// This plugin imports consultopo to register the consul implementation of TopoServer.
diff --git a/go/cmd/vtgate/plugin_etcd2topo.go b/go/cmd/vtorc/cli/plugin_etcd2topo.go
similarity index 98%
rename from go/cmd/vtgate/plugin_etcd2topo.go
rename to go/cmd/vtorc/cli/plugin_etcd2topo.go
index d99ef51d4af..5a51923cf00 100644
--- a/go/cmd/vtgate/plugin_etcd2topo.go
+++ b/go/cmd/vtorc/cli/plugin_etcd2topo.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
// This plugin imports etcd2topo to register the etcd2 implementation of TopoServer.
diff --git a/go/cmd/vtctld/plugin_grpctmclient.go b/go/cmd/vtorc/cli/plugin_grpctmclient.go
similarity index 98%
rename from go/cmd/vtctld/plugin_grpctmclient.go
rename to go/cmd/vtorc/cli/plugin_grpctmclient.go
index ce554da96df..8cd349c7f87 100644
--- a/go/cmd/vtctld/plugin_grpctmclient.go
+++ b/go/cmd/vtorc/cli/plugin_grpctmclient.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
// Imports and register the gRPC tabletmanager client
diff --git a/go/cmd/vtorc/plugin_prometheusbackend.go b/go/cmd/vtorc/cli/plugin_prometheusbackend.go
similarity index 98%
rename from go/cmd/vtorc/plugin_prometheusbackend.go
rename to go/cmd/vtorc/cli/plugin_prometheusbackend.go
index 868e097ade2..8cb6e034d8a 100644
--- a/go/cmd/vtorc/plugin_prometheusbackend.go
+++ b/go/cmd/vtorc/cli/plugin_prometheusbackend.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
// This plugin imports Prometheus to allow for instrumentation
// with the Prometheus client library
diff --git a/go/cmd/vtorc/plugin_zk2topo.go b/go/cmd/vtorc/cli/plugin_zk2topo.go
similarity index 98%
rename from go/cmd/vtorc/plugin_zk2topo.go
rename to go/cmd/vtorc/cli/plugin_zk2topo.go
index ebf385ec1af..d71a7e2e196 100644
--- a/go/cmd/vtorc/plugin_zk2topo.go
+++ b/go/cmd/vtorc/cli/plugin_zk2topo.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
// Imports and register the zk2 TopologyServer
diff --git a/go/cmd/vtorc/docgen/main.go b/go/cmd/vtorc/docgen/main.go
new file mode 100644
index 00000000000..22daccab302
--- /dev/null
+++ b/go/cmd/vtorc/docgen/main.go
@@ -0,0 +1,37 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/cmd/internal/docgen"
+ "vitess.io/vitess/go/cmd/vtorc/cli"
+)
+
+func main() {
+ var dir string
+ cmd := cobra.Command{
+ Use: "docgen [-d ]",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return docgen.GenerateMarkdownTree(cli.Main, dir)
+ },
+ }
+
+ cmd.Flags().StringVarP(&dir, "dir", "d", "doc", "output directory to write documentation")
+ _ = cmd.Execute()
+}
diff --git a/go/cmd/vtorc/main.go b/go/cmd/vtorc/main.go
index c80f0573948..be6dfbb84c6 100644
--- a/go/cmd/vtorc/main.go
+++ b/go/cmd/vtorc/main.go
@@ -17,119 +17,17 @@
package main
import (
- "strings"
-
_ "github.com/go-sql-driver/mysql"
- "github.com/spf13/pflag"
_ "modernc.org/sqlite"
- "vitess.io/vitess/go/acl"
+ "vitess.io/vitess/go/cmd/vtorc/cli"
"vitess.io/vitess/go/vt/log"
- "vitess.io/vitess/go/vt/servenv"
- "vitess.io/vitess/go/vt/vtorc/config"
- "vitess.io/vitess/go/vt/vtorc/inst"
- "vitess.io/vitess/go/vt/vtorc/logic"
- "vitess.io/vitess/go/vt/vtorc/server"
)
-// transformArgsForPflag turns a slice of raw args passed on the command line,
-// possibly incompatible with pflag (because the user is expecting stdlib flag
-// parsing behavior) and transforms them into the arguments that should have
-// been passed to conform to pflag parsing behavior.
-//
-// the primary function is to catch any cases where the user specified a longopt
-// with only a single hyphen (e.g. `-myflag`) and correct it to be
-// double-hyphenated.
-//
-// note that this transformation does _not_ actually validate the arguments; for
-// example if the user specifies `--myflag`, but the FlagSet has no such flag
-// defined, that will still appear in the returned result and will (correctly)
-// cause a parse error later on in `main`, at which point the CLI usage will
-// be printed.
-//
-// note also that this transformation is incomplete. pflag allows interspersing
-// of flag and positional arguments, whereas stdlib flag does not. however, for
-// vtorc specifically, with the exception of `vtorc help `, the CLI only
-// consumes flag arguments (in other words, there are no supported subcommands),
-// so this is a non-issue, and is not implemented here in order to make this
-// function a bit simpler.
-func transformArgsForPflag(fs *pflag.FlagSet, args []string) (result []string) {
- for i, arg := range args {
- switch {
- case arg == "--":
- // pflag stops parsing at `--`, so we're done transforming the CLI
- // arguments. Just append everything remaining and be done.
- result = append(result, args[i:]...)
- return result
- case strings.HasPrefix(arg, "--"):
- // Long-hand flag. Append it and continue.
- result = append(result, arg)
- case strings.HasPrefix(arg, "-"):
- // Most complex case. This is either:
- // 1. A legacy long-hand flag that needs a double-dash (e.g. `-myflag` => `--myflag`).
- // 2. One _or more_ pflag shortopts all shoved together (think `rm -rf` as `rm -r -f`).
- //
- // In the latter case, we don't need to do any transformations, but
- // in the former, we do.
- name := strings.SplitN(arg[1:], "=", 2)[0] // discard any potential value (`-myflag` and `-myflag=10` both have the name of `myflag`)
- if fs.Lookup(name) != nil || name == "help" {
- // Case 1: We have a long opt with this name, so we need to
- // prepend an additional hyphen.
- result = append(result, "-"+arg)
- } else {
- // Case 2: No transformation needed.
- result = append(result, arg)
- }
- default:
- // Just a flag argument. Nothing to transform.
- result = append(result, arg)
- }
- }
-
- return result
-}
-
// main is the application's entry point. It will spawn an HTTP interface.
func main() {
- servenv.RegisterDefaultFlags()
- servenv.RegisterFlags()
-
- var configFile string
- servenv.OnParseFor("vtorc", func(fs *pflag.FlagSet) {
- logic.RegisterFlags(fs)
- server.RegisterFlags(fs)
- config.RegisterFlags(fs)
- acl.RegisterFlags(fs)
-
- fs.StringVar(&configFile, "config", "", "config file name")
- })
- servenv.ParseFlags("vtorc")
- servenv.Init()
- config.UpdateConfigValuesFromFlags()
-
- log.Info("starting vtorc")
- if len(configFile) > 0 {
- config.ForceRead(configFile)
- } else {
- config.Read("/etc/vtorc.conf.json", "conf/vtorc.conf.json", "vtorc.conf.json")
- }
- if config.Config.AuditToSyslog {
- inst.EnableAuditSyslog()
+ // TODO: viperutil.BindFlags()
+ if err := cli.Main.Execute(); err != nil {
+ log.Exit(err)
}
- config.MarkConfigurationLoaded()
-
- // Log final config values to debug if something goes wrong.
- config.LogConfigValues()
- server.StartVTOrcDiscovery()
-
- server.RegisterVTOrcAPIEndpoints()
- servenv.OnRun(func() {
- addStatusParts()
- })
-
- // For backward compatability, we require that VTOrc functions even when the --port flag is not provided.
- // In this case, it should function like before but without the servenv pages.
- // Therefore, currently we don't check for the --port flag to be necessary, but release 16+ that check
- // can be added to always have the serenv page running in VTOrc.
- servenv.RunDefault()
}
diff --git a/go/cmd/vtorc/main_test.go b/go/cmd/vtorc/main_test.go
deleted file mode 100644
index 5bbdcdaf981..00000000000
--- a/go/cmd/vtorc/main_test.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package main
-
-import (
- "strings"
- "testing"
-
- "github.com/spf13/pflag"
- "github.com/stretchr/testify/assert"
-)
-
-func Test_transformArgsForPflag(t *testing.T) {
- fs := pflag.NewFlagSet("test", pflag.ContinueOnError)
- fs.String("foobar", "baz", "")
- fs.StringP("name", "n", "", "")
- fs.BoolP("debug", "d", true, "")
-
- tests := []struct {
- args []string
- transformed []string
- }{
- {
- args: []string{"--foobar=hello", "--name", "myname", "-d"},
- transformed: []string{"--foobar=hello", "--name", "myname", "-d"},
- },
- {
- args: []string{"-foobar=hello", "-name", "myname", "-d"},
- transformed: []string{"--foobar=hello", "--name", "myname", "-d"},
- },
- {
- args: []string{"--", "-foobar=hello"},
- transformed: []string{"--", "-foobar=hello"},
- },
- {
- args: []string{"-dn"}, // combined shortopts
- transformed: []string{"-dn"},
- },
- }
-
- for _, tt := range tests {
- tt := tt
- name := strings.Join(tt.args, " ")
-
- t.Run(name, func(t *testing.T) {
- got := transformArgsForPflag(fs, tt.args)
- assert.Equal(t, tt.transformed, got)
- })
- }
-}
diff --git a/go/cmd/vtorc/plugin_kubernetestopo.go b/go/cmd/vtorc/plugin_kubernetestopo.go
deleted file mode 100644
index 671d0c8321f..00000000000
--- a/go/cmd/vtorc/plugin_kubernetestopo.go
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
-Copyright 2020 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package main
-
-// This plugin imports k8stopo to register the kubernetes implementation of TopoServer.
-
-import (
- _ "vitess.io/vitess/go/vt/topo/k8stopo"
-)
diff --git a/go/cmd/vttablet/cli/cli.go b/go/cmd/vttablet/cli/cli.go
new file mode 100644
index 00000000000..1efa35613d7
--- /dev/null
+++ b/go/cmd/vttablet/cli/cli.go
@@ -0,0 +1,276 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreedto in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cli
+
+import (
+ "bytes"
+ "context"
+ "fmt"
+ "os"
+ "time"
+
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/acl"
+ "vitess.io/vitess/go/vt/binlog"
+ "vitess.io/vitess/go/vt/dbconfigs"
+ "vitess.io/vitess/go/vt/log"
+ "vitess.io/vitess/go/vt/mysqlctl"
+ "vitess.io/vitess/go/vt/servenv"
+ "vitess.io/vitess/go/vt/tableacl"
+ "vitess.io/vitess/go/vt/tableacl/simpleacl"
+ "vitess.io/vitess/go/vt/topo"
+ "vitess.io/vitess/go/vt/topo/topoproto"
+ "vitess.io/vitess/go/vt/vttablet/onlineddl"
+ "vitess.io/vitess/go/vt/vttablet/tabletmanager"
+ "vitess.io/vitess/go/vt/vttablet/tabletmanager/vdiff"
+ "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication"
+ "vitess.io/vitess/go/vt/vttablet/tabletserver"
+ "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv"
+ "vitess.io/vitess/go/yaml2"
+ "vitess.io/vitess/resources"
+
+ topodatapb "vitess.io/vitess/go/vt/proto/topodata"
+)
+
+var (
+ enforceTableACLConfig bool
+ tableACLConfig string
+ tableACLConfigReloadInterval time.Duration
+ tabletPath string
+ tabletConfig string
+
+ tm *tabletmanager.TabletManager
+
+ Main = &cobra.Command{
+ Use: "vttablet",
+ Short: "The VTTablet server controls a running MySQL server.",
+ Long: `The VTTablet server _controls_ a running MySQL server. VTTablet supports two primary types of deployments:
+
+* Managed MySQL (most common)
+* External MySQL
+
+In addition to these deployment types, a partially managed VTTablet is also possible by setting ` + "`--disable_active_reparents`." + `
+
+### Managed MySQL
+
+In this mode, Vitess actively manages MySQL.
+
+### External MySQL.
+
+In this mode, an external MySQL can be used such as AWS RDS, AWS Aurora, Google CloudSQL; or just an existing (vanilla) MySQL installation.
+
+See "Unmanaged Tablet" for the full guide.
+
+Even if a MySQL is external, you can still make vttablet perform some management functions. They are as follows:
+
+` +
+ "* `--disable_active_reparents`: If this flag is set, then any reparent or replica commands will not be allowed. These are InitShardPrimary, PlannedReparentShard, EmergencyReparentShard, and ReparentTablet. In this mode, you should use the TabletExternallyReparented command to inform vitess of the current primary.\n" +
+ "* `--replication_connect_retry`: This value is give to mysql when it connects a replica to the primary as the retry duration parameter.\n" +
+ "* `--enable_replication_reporter`: If this flag is set, then vttablet will transmit replica lag related information to the vtgates, which will allow it to balance load better. Additionally, enabling this will also cause vttablet to restart replication if it was stopped. However, it will do this only if `--disable_active_reparents` was not turned on.\n" +
+ "* `--heartbeat_enable` and `--heartbeat_interval duration`: cause vttablet to write heartbeats to the sidecar database. This information is also used by the replication reporter to assess replica lag.\n",
+ Example: `
+vttablet \
+ --topo_implementation etcd2 \
+ --topo_global_server_address localhost:2379 \
+ --topo_global_root /vitess/ \
+ --tablet-path $alias \
+ --init_keyspace $keyspace \
+ --init_shard $shard \
+ --init_tablet_type $tablet_type \
+ --port $port \
+ --grpc_port $grpc_port \
+ --service_map 'grpc-queryservice,grpc-tabletmanager,grpc-updatestream'` + "\n\n`$alias` needs to be of the form: `-id`, and the cell should match one of the local cells that was created in the topology. The id can be left padded with zeroes: `cell-100` and `cell-000000100` are synonymous.",
+ Args: cobra.NoArgs,
+ Version: servenv.AppVersion.String(),
+ PreRunE: servenv.CobraPreRunE,
+ RunE: run,
+ }
+)
+
+func run(cmd *cobra.Command, args []string) error {
+ servenv.Init()
+
+ tabletAlias, err := topoproto.ParseTabletAlias(tabletPath)
+ if err != nil {
+ return fmt.Errorf("failed to parse --tablet-path: %w", err)
+ }
+
+ // config and mycnf initializations are intertwined.
+ config, mycnf, err := initConfig(tabletAlias)
+ if err != nil {
+ return err
+ }
+
+ ts := topo.Open()
+ qsc, err := createTabletServer(context.Background(), config, ts, tabletAlias)
+ if err != nil {
+ ts.Close()
+ return err
+ }
+
+ mysqld := mysqlctl.NewMysqld(config.DB)
+ servenv.OnClose(mysqld.Close)
+
+ if err := extractOnlineDDL(); err != nil {
+ ts.Close()
+ return fmt.Errorf("failed to extract online DDL binaries: %w", err)
+ }
+
+ // Initialize and start tm.
+ gRPCPort := int32(0)
+ if servenv.GRPCPort() != 0 {
+ gRPCPort = int32(servenv.GRPCPort())
+ }
+ tablet, err := tabletmanager.BuildTabletFromInput(tabletAlias, int32(servenv.Port()), gRPCPort, config.DB)
+ if err != nil {
+ return fmt.Errorf("failed to parse --tablet-path: %w", err)
+ }
+ tm = &tabletmanager.TabletManager{
+ BatchCtx: context.Background(),
+ TopoServer: ts,
+ Cnf: mycnf,
+ MysqlDaemon: mysqld,
+ DBConfigs: config.DB.Clone(),
+ QueryServiceControl: qsc,
+ UpdateStream: binlog.NewUpdateStream(ts, tablet.Keyspace, tabletAlias.Cell, qsc.SchemaEngine()),
+ VREngine: vreplication.NewEngine(config, ts, tabletAlias.Cell, mysqld, qsc.LagThrottler()),
+ VDiffEngine: vdiff.NewEngine(config, ts, tablet),
+ }
+ if err := tm.Start(tablet, config.Healthcheck.IntervalSeconds.Get()); err != nil {
+ ts.Close()
+ return fmt.Errorf("failed to parse --tablet-path or initialize DB credentials: %w", err)
+ }
+ servenv.OnClose(func() {
+ // Close the tm so that our topo entry gets pruned properly and any
+ // background goroutines that use the topo connection are stopped.
+ tm.Close()
+
+ // tm uses ts. So, it should be closed after tm.
+ ts.Close()
+ })
+
+ servenv.RunDefault()
+
+ return nil
+}
+
+func initConfig(tabletAlias *topodatapb.TabletAlias) (*tabletenv.TabletConfig, *mysqlctl.Mycnf, error) {
+ tabletenv.Init()
+ // Load current config after tabletenv.Init, because it changes it.
+ config := tabletenv.NewCurrentConfig()
+ if err := config.Verify(); err != nil {
+ return nil, nil, fmt.Errorf("invalid config: %w", err)
+ }
+
+ if tabletConfig != "" {
+ bytes, err := os.ReadFile(tabletConfig)
+ if err != nil {
+ return nil, nil, fmt.Errorf("error reading config file %s: %w", tabletConfig, err)
+ }
+ if err := yaml2.Unmarshal(bytes, config); err != nil {
+ return nil, nil, fmt.Errorf("error parsing config file %s: %w", bytes, err)
+ }
+ }
+ gotBytes, _ := yaml2.Marshal(config)
+ log.Infof("Loaded config file %s successfully:\n%s", tabletConfig, gotBytes)
+
+ var (
+ mycnf *mysqlctl.Mycnf
+ socketFile string
+ )
+ // If no connection parameters were specified, load the mycnf file
+ // and use the socket from it. If connection parameters were specified,
+ // we assume that the mysql is not local, and we skip loading mycnf.
+ // This also means that backup and restore will not be allowed.
+ if !config.DB.HasGlobalSettings() {
+ var err error
+ if mycnf, err = mysqlctl.NewMycnfFromFlags(tabletAlias.Uid); err != nil {
+ return nil, nil, fmt.Errorf("mycnf read failed: %w", err)
+ }
+
+ socketFile = mycnf.SocketFile
+ } else {
+ log.Info("connection parameters were specified. Not loading my.cnf.")
+ }
+
+ // If connection parameters were specified, socketFile will be empty.
+ // Otherwise, the socketFile (read from mycnf) will be used to initialize
+ // dbconfigs.
+ config.DB.InitWithSocket(socketFile)
+ for _, cfg := range config.ExternalConnections {
+ cfg.InitWithSocket("")
+ }
+ return config, mycnf, nil
+}
+
+// extractOnlineDDL extracts the gh-ost binary from this executable. gh-ost is appended
+// to vttablet executable by `make build` with a go:embed
+func extractOnlineDDL() error {
+ if binaryFileName, isOverride := onlineddl.GhostBinaryFileName(); !isOverride {
+ if err := os.WriteFile(binaryFileName, resources.GhostBinary, 0755); err != nil {
+ // One possibility of failure is that gh-ost is up and running. In that case,
+ // let's pause and check if the running gh-ost is exact same binary as the one we wish to extract.
+ foundBytes, _ := os.ReadFile(binaryFileName)
+ if bytes.Equal(resources.GhostBinary, foundBytes) {
+ // OK, it's the same binary, there is no need to extract the file anyway
+ return nil
+ }
+ return err
+ }
+ }
+
+ return nil
+}
+
+func createTabletServer(ctx context.Context, config *tabletenv.TabletConfig, ts *topo.Server, tabletAlias *topodatapb.TabletAlias) (*tabletserver.TabletServer, error) {
+ if tableACLConfig != "" {
+ // To override default simpleacl, other ACL plugins must set themselves to be default ACL factory
+ tableacl.Register("simpleacl", &simpleacl.Factory{})
+ } else if enforceTableACLConfig {
+ return nil, fmt.Errorf("table acl config has to be specified with table-acl-config flag because enforce-tableacl-config is set.")
+ }
+ // creates and registers the query service
+ qsc := tabletserver.NewTabletServer(ctx, "", config, ts, tabletAlias)
+ servenv.OnRun(func() {
+ qsc.Register()
+ addStatusParts(qsc)
+ })
+ servenv.OnClose(qsc.StopService)
+ qsc.InitACL(tableACLConfig, enforceTableACLConfig, tableACLConfigReloadInterval)
+ return qsc, nil
+}
+
+func init() {
+ servenv.RegisterDefaultFlags()
+ servenv.RegisterFlags()
+ servenv.RegisterGRPCServerFlags()
+ servenv.RegisterGRPCServerAuthFlags()
+ servenv.RegisterServiceMapFlag()
+
+ dbconfigs.RegisterFlags(dbconfigs.All...)
+ mysqlctl.RegisterFlags()
+
+ servenv.MoveFlagsToCobraCommand(Main)
+
+ acl.RegisterFlags(Main.Flags())
+ Main.Flags().BoolVar(&enforceTableACLConfig, "enforce-tableacl-config", enforceTableACLConfig, "if this flag is true, vttablet will fail to start if a valid tableacl config does not exist")
+ Main.Flags().StringVar(&tableACLConfig, "table-acl-config", tableACLConfig, "path to table access checker config file; send SIGHUP to reload this file")
+ Main.Flags().DurationVar(&tableACLConfigReloadInterval, "table-acl-config-reload-interval", tableACLConfigReloadInterval, "Ticker to reload ACLs. Duration flag, format e.g.: 30s. Default: do not reload")
+ Main.Flags().StringVar(&tabletPath, "tablet-path", tabletPath, "tablet alias")
+ Main.Flags().StringVar(&tabletConfig, "tablet_config", tabletConfig, "YAML file config for tablet")
+}
diff --git a/go/cmd/vttablet/plugin_azblobbackupstorage.go b/go/cmd/vttablet/cli/plugin_azblobbackupstorage.go
similarity index 97%
rename from go/cmd/vttablet/plugin_azblobbackupstorage.go
rename to go/cmd/vttablet/cli/plugin_azblobbackupstorage.go
index a4ca64096a9..bdadc894aae 100644
--- a/go/cmd/vttablet/plugin_azblobbackupstorage.go
+++ b/go/cmd/vttablet/cli/plugin_azblobbackupstorage.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
import (
_ "vitess.io/vitess/go/vt/mysqlctl/azblobbackupstorage"
diff --git a/go/cmd/vttablet/plugin_cephbackupstorage.go b/go/cmd/vttablet/cli/plugin_cephbackupstorage.go
similarity index 97%
rename from go/cmd/vttablet/plugin_cephbackupstorage.go
rename to go/cmd/vttablet/cli/plugin_cephbackupstorage.go
index 6cd2d5619d0..171198f5e29 100644
--- a/go/cmd/vttablet/plugin_cephbackupstorage.go
+++ b/go/cmd/vttablet/cli/plugin_cephbackupstorage.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
import (
_ "vitess.io/vitess/go/vt/mysqlctl/cephbackupstorage"
diff --git a/go/cmd/vttablet/plugin_consultopo.go b/go/cmd/vttablet/cli/plugin_consultopo.go
similarity index 98%
rename from go/cmd/vttablet/plugin_consultopo.go
rename to go/cmd/vttablet/cli/plugin_consultopo.go
index 59d6774fdbc..a128f294a42 100644
--- a/go/cmd/vttablet/plugin_consultopo.go
+++ b/go/cmd/vttablet/cli/plugin_consultopo.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
// This plugin imports consultopo to register the consul implementation of TopoServer.
diff --git a/go/cmd/topo2topo/plugin_etcd2topo.go b/go/cmd/vttablet/cli/plugin_etcd2topo.go
similarity index 98%
rename from go/cmd/topo2topo/plugin_etcd2topo.go
rename to go/cmd/vttablet/cli/plugin_etcd2topo.go
index d99ef51d4af..5a51923cf00 100644
--- a/go/cmd/topo2topo/plugin_etcd2topo.go
+++ b/go/cmd/vttablet/cli/plugin_etcd2topo.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
// This plugin imports etcd2topo to register the etcd2 implementation of TopoServer.
diff --git a/go/cmd/vtctld/plugin_filebackupstorage.go b/go/cmd/vttablet/cli/plugin_filebackupstorage.go
similarity index 97%
rename from go/cmd/vtctld/plugin_filebackupstorage.go
rename to go/cmd/vttablet/cli/plugin_filebackupstorage.go
index cf2ceb5150f..9edc82d6a1b 100644
--- a/go/cmd/vtctld/plugin_filebackupstorage.go
+++ b/go/cmd/vttablet/cli/plugin_filebackupstorage.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
import (
_ "vitess.io/vitess/go/vt/mysqlctl/filebackupstorage"
diff --git a/go/cmd/vttablet/plugin_filecustomrule.go b/go/cmd/vttablet/cli/plugin_filecustomrule.go
similarity index 98%
rename from go/cmd/vttablet/plugin_filecustomrule.go
rename to go/cmd/vttablet/cli/plugin_filecustomrule.go
index 854c484d3c1..1bf3c4297d5 100644
--- a/go/cmd/vttablet/plugin_filecustomrule.go
+++ b/go/cmd/vttablet/cli/plugin_filecustomrule.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
// Imports and register the file custom rule source
diff --git a/go/cmd/vttablet/plugin_filelogger.go b/go/cmd/vttablet/cli/plugin_filelogger.go
similarity index 98%
rename from go/cmd/vttablet/plugin_filelogger.go
rename to go/cmd/vttablet/cli/plugin_filelogger.go
index bc5d968d2f7..fd5104f69a8 100644
--- a/go/cmd/vttablet/plugin_filelogger.go
+++ b/go/cmd/vttablet/cli/plugin_filelogger.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
// Imports and register the file-based query logger
diff --git a/go/cmd/vttablet/plugin_gcsbackupstorage.go b/go/cmd/vttablet/cli/plugin_gcsbackupstorage.go
similarity index 97%
rename from go/cmd/vttablet/plugin_gcsbackupstorage.go
rename to go/cmd/vttablet/cli/plugin_gcsbackupstorage.go
index 82a22cef1da..655583c8ca2 100644
--- a/go/cmd/vttablet/plugin_gcsbackupstorage.go
+++ b/go/cmd/vttablet/cli/plugin_gcsbackupstorage.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
import (
_ "vitess.io/vitess/go/vt/mysqlctl/gcsbackupstorage"
diff --git a/go/cmd/vttablet/plugin_grpcbinlogplayer.go b/go/cmd/vttablet/cli/plugin_grpcbinlogplayer.go
similarity index 98%
rename from go/cmd/vttablet/plugin_grpcbinlogplayer.go
rename to go/cmd/vttablet/cli/plugin_grpcbinlogplayer.go
index f8b2380c7c7..31920b97fae 100644
--- a/go/cmd/vttablet/plugin_grpcbinlogplayer.go
+++ b/go/cmd/vttablet/cli/plugin_grpcbinlogplayer.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
// Imports and register the gRPC binlog player
diff --git a/go/cmd/vttablet/plugin_grpcbinlogstreamer.go b/go/cmd/vttablet/cli/plugin_grpcbinlogstreamer.go
similarity index 98%
rename from go/cmd/vttablet/plugin_grpcbinlogstreamer.go
rename to go/cmd/vttablet/cli/plugin_grpcbinlogstreamer.go
index 26683ea7ccf..716dd499785 100644
--- a/go/cmd/vttablet/plugin_grpcbinlogstreamer.go
+++ b/go/cmd/vttablet/cli/plugin_grpcbinlogstreamer.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
// Imports and register the gRPC binlog streamer
diff --git a/go/cmd/vttablet/plugin_grpcqueryservice.go b/go/cmd/vttablet/cli/plugin_grpcqueryservice.go
similarity index 98%
rename from go/cmd/vttablet/plugin_grpcqueryservice.go
rename to go/cmd/vttablet/cli/plugin_grpcqueryservice.go
index 073c2009151..a46701d16aa 100644
--- a/go/cmd/vttablet/plugin_grpcqueryservice.go
+++ b/go/cmd/vttablet/cli/plugin_grpcqueryservice.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
// Imports and register the gRPC queryservice server
diff --git a/go/cmd/vttablet/plugin_grpctabletconn.go b/go/cmd/vttablet/cli/plugin_grpctabletconn.go
similarity index 98%
rename from go/cmd/vttablet/plugin_grpctabletconn.go
rename to go/cmd/vttablet/cli/plugin_grpctabletconn.go
index 08291a7c916..4a97e36eec4 100644
--- a/go/cmd/vttablet/plugin_grpctabletconn.go
+++ b/go/cmd/vttablet/cli/plugin_grpctabletconn.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
// Imports and register the gRPC tabletconn client
diff --git a/go/cmd/vttablet/plugin_grpcthrottlerserver.go b/go/cmd/vttablet/cli/plugin_grpcthrottlerserver.go
similarity index 98%
rename from go/cmd/vttablet/plugin_grpcthrottlerserver.go
rename to go/cmd/vttablet/cli/plugin_grpcthrottlerserver.go
index 40cce4bd51c..f25fdb73df3 100644
--- a/go/cmd/vttablet/plugin_grpcthrottlerserver.go
+++ b/go/cmd/vttablet/cli/plugin_grpcthrottlerserver.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
// Imports and register the gRPC throttler server.
diff --git a/go/cmd/vttablet/plugin_grpctmclient.go b/go/cmd/vttablet/cli/plugin_grpctmclient.go
similarity index 98%
rename from go/cmd/vttablet/plugin_grpctmclient.go
rename to go/cmd/vttablet/cli/plugin_grpctmclient.go
index ce554da96df..8cd349c7f87 100644
--- a/go/cmd/vttablet/plugin_grpctmclient.go
+++ b/go/cmd/vttablet/cli/plugin_grpctmclient.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
// Imports and register the gRPC tabletmanager client
diff --git a/go/cmd/vttablet/plugin_grpctmserver.go b/go/cmd/vttablet/cli/plugin_grpctmserver.go
similarity index 98%
rename from go/cmd/vttablet/plugin_grpctmserver.go
rename to go/cmd/vttablet/cli/plugin_grpctmserver.go
index 094d273fe39..6dee0146c21 100644
--- a/go/cmd/vttablet/plugin_grpctmserver.go
+++ b/go/cmd/vttablet/cli/plugin_grpctmserver.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
// Imports and register the gRPC tabletmanager server
diff --git a/go/cmd/vttablet/plugin_opentracing.go b/go/cmd/vttablet/cli/plugin_opentracing.go
similarity index 98%
rename from go/cmd/vttablet/plugin_opentracing.go
rename to go/cmd/vttablet/cli/plugin_opentracing.go
index 942bb25c895..f836daf4036 100644
--- a/go/cmd/vttablet/plugin_opentracing.go
+++ b/go/cmd/vttablet/cli/plugin_opentracing.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
import (
"vitess.io/vitess/go/trace"
diff --git a/go/cmd/vttablet/plugin_opentsdb.go b/go/cmd/vttablet/cli/plugin_opentsdb.go
similarity index 98%
rename from go/cmd/vttablet/plugin_opentsdb.go
rename to go/cmd/vttablet/cli/plugin_opentsdb.go
index 494dbbee20d..328628c2a3d 100644
--- a/go/cmd/vttablet/plugin_opentsdb.go
+++ b/go/cmd/vttablet/cli/plugin_opentsdb.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
// This plugin imports opentsdb to register the opentsdb stats backend.
diff --git a/go/cmd/vttablet/plugin_prometheusbackend.go b/go/cmd/vttablet/cli/plugin_prometheusbackend.go
similarity index 98%
rename from go/cmd/vttablet/plugin_prometheusbackend.go
rename to go/cmd/vttablet/cli/plugin_prometheusbackend.go
index 4066b5ba6ec..a169c6d9777 100644
--- a/go/cmd/vttablet/plugin_prometheusbackend.go
+++ b/go/cmd/vttablet/cli/plugin_prometheusbackend.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
// This plugin imports Prometheus to allow for instrumentation
// with the Prometheus client library
diff --git a/go/cmd/vttablet/plugin_s3backupstorage.go b/go/cmd/vttablet/cli/plugin_s3backupstorage.go
similarity index 97%
rename from go/cmd/vttablet/plugin_s3backupstorage.go
rename to go/cmd/vttablet/cli/plugin_s3backupstorage.go
index a5b5c671ebb..4b3ecb33edb 100644
--- a/go/cmd/vttablet/plugin_s3backupstorage.go
+++ b/go/cmd/vttablet/cli/plugin_s3backupstorage.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
import (
_ "vitess.io/vitess/go/vt/mysqlctl/s3backupstorage"
diff --git a/go/cmd/vttablet/cli/plugin_statsd.go b/go/cmd/vttablet/cli/plugin_statsd.go
new file mode 100644
index 00000000000..189e0367eb0
--- /dev/null
+++ b/go/cmd/vttablet/cli/plugin_statsd.go
@@ -0,0 +1,22 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package cli
+
+import "vitess.io/vitess/go/stats/statsd"
+
+func init() {
+ statsd.Init("vttablet")
+}
diff --git a/go/cmd/vttablet/plugin_sysloglogger.go b/go/cmd/vttablet/cli/plugin_sysloglogger.go
similarity index 98%
rename from go/cmd/vttablet/plugin_sysloglogger.go
rename to go/cmd/vttablet/cli/plugin_sysloglogger.go
index 4c57ad006c3..a7260d6f8cc 100644
--- a/go/cmd/vttablet/plugin_sysloglogger.go
+++ b/go/cmd/vttablet/cli/plugin_sysloglogger.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
// Imports and register the syslog-based query logger
diff --git a/go/cmd/vttablet/plugin_topocustomrule.go b/go/cmd/vttablet/cli/plugin_topocustomrule.go
similarity index 98%
rename from go/cmd/vttablet/plugin_topocustomrule.go
rename to go/cmd/vttablet/cli/plugin_topocustomrule.go
index cef81458155..9fce319558e 100644
--- a/go/cmd/vttablet/plugin_topocustomrule.go
+++ b/go/cmd/vttablet/cli/plugin_topocustomrule.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
// Imports and register the topo custom rule source
diff --git a/go/cmd/vttablet/plugin_zk2topo.go b/go/cmd/vttablet/cli/plugin_zk2topo.go
similarity index 98%
rename from go/cmd/vttablet/plugin_zk2topo.go
rename to go/cmd/vttablet/cli/plugin_zk2topo.go
index ebf385ec1af..d71a7e2e196 100644
--- a/go/cmd/vttablet/plugin_zk2topo.go
+++ b/go/cmd/vttablet/cli/plugin_zk2topo.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
// Imports and register the zk2 TopologyServer
diff --git a/go/cmd/vttablet/status.go b/go/cmd/vttablet/cli/status.go
similarity index 97%
rename from go/cmd/vttablet/status.go
rename to go/cmd/vttablet/cli/status.go
index ff3b65134c7..762a9fa646e 100644
--- a/go/cmd/vttablet/status.go
+++ b/go/cmd/vttablet/cli/status.go
@@ -1,5 +1,5 @@
/*
-Copyright 2019 The Vitess Authors.
+Copyright 2023 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,11 +14,10 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
import (
"vitess.io/vitess/go/vt/servenv"
- _ "vitess.io/vitess/go/vt/status"
"vitess.io/vitess/go/vt/topo"
"vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication"
"vitess.io/vitess/go/vt/vttablet/tabletserver"
diff --git a/go/cmd/vttablet/docgen/main.go b/go/cmd/vttablet/docgen/main.go
new file mode 100644
index 00000000000..9915d641352
--- /dev/null
+++ b/go/cmd/vttablet/docgen/main.go
@@ -0,0 +1,37 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/cmd/internal/docgen"
+ "vitess.io/vitess/go/cmd/vttablet/cli"
+)
+
+func main() {
+ var dir string
+ cmd := cobra.Command{
+ Use: "docgen [-d ]",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return docgen.GenerateMarkdownTree(cli.Main, dir)
+ },
+ }
+
+ cmd.Flags().StringVarP(&dir, "dir", "d", "doc", "output directory to write documentation")
+ _ = cmd.Execute()
+}
diff --git a/go/cmd/vttablet/plugin_kubernetestopo.go b/go/cmd/vttablet/plugin_kubernetestopo.go
deleted file mode 100644
index 671d0c8321f..00000000000
--- a/go/cmd/vttablet/plugin_kubernetestopo.go
+++ /dev/null
@@ -1,23 +0,0 @@
-/*
-Copyright 2020 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package main
-
-// This plugin imports k8stopo to register the kubernetes implementation of TopoServer.
-
-import (
- _ "vitess.io/vitess/go/vt/topo/k8stopo"
-)
diff --git a/go/cmd/vttablet/plugin_statsd.go b/go/cmd/vttablet/plugin_statsd.go
deleted file mode 100644
index 51761e6c406..00000000000
--- a/go/cmd/vttablet/plugin_statsd.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package main
-
-import "vitess.io/vitess/go/stats/statsd"
-
-func init() {
- statsd.Init("vttablet")
-}
diff --git a/go/cmd/vttablet/vttablet.go b/go/cmd/vttablet/vttablet.go
index f879d2728e1..0f91f48b649 100644
--- a/go/cmd/vttablet/vttablet.go
+++ b/go/cmd/vttablet/vttablet.go
@@ -18,206 +18,12 @@ limitations under the License.
package main
import (
- "bytes"
- "context"
- "os"
- "time"
-
- "github.com/spf13/pflag"
-
- "vitess.io/vitess/go/acl"
- "vitess.io/vitess/go/vt/binlog"
- "vitess.io/vitess/go/vt/dbconfigs"
+ "vitess.io/vitess/go/cmd/vttablet/cli"
"vitess.io/vitess/go/vt/log"
- "vitess.io/vitess/go/vt/mysqlctl"
- "vitess.io/vitess/go/vt/servenv"
- "vitess.io/vitess/go/vt/tableacl"
- "vitess.io/vitess/go/vt/tableacl/simpleacl"
- "vitess.io/vitess/go/vt/topo"
- "vitess.io/vitess/go/vt/topo/topoproto"
- "vitess.io/vitess/go/vt/vttablet/onlineddl"
- "vitess.io/vitess/go/vt/vttablet/tabletmanager"
- "vitess.io/vitess/go/vt/vttablet/tabletmanager/vdiff"
- "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication"
- "vitess.io/vitess/go/vt/vttablet/tabletserver"
- "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv"
- "vitess.io/vitess/go/yaml2"
- "vitess.io/vitess/resources"
-
- topodatapb "vitess.io/vitess/go/vt/proto/topodata"
-)
-
-var (
- enforceTableACLConfig bool
- tableACLConfig string
- tableACLConfigReloadInterval time.Duration
- tabletPath string
- tabletConfig string
-
- tm *tabletmanager.TabletManager
)
-func registerFlags(fs *pflag.FlagSet) {
- fs.BoolVar(&enforceTableACLConfig, "enforce-tableacl-config", enforceTableACLConfig, "if this flag is true, vttablet will fail to start if a valid tableacl config does not exist")
- fs.StringVar(&tableACLConfig, "table-acl-config", tableACLConfig, "path to table access checker config file; send SIGHUP to reload this file")
- fs.DurationVar(&tableACLConfigReloadInterval, "table-acl-config-reload-interval", tableACLConfigReloadInterval, "Ticker to reload ACLs. Duration flag, format e.g.: 30s. Default: do not reload")
- fs.StringVar(&tabletPath, "tablet-path", tabletPath, "tablet alias")
- fs.StringVar(&tabletConfig, "tablet_config", tabletConfig, "YAML file config for tablet")
-
- acl.RegisterFlags(fs)
-}
-
-func init() {
- servenv.RegisterDefaultFlags()
- servenv.RegisterFlags()
- servenv.RegisterGRPCServerFlags()
- servenv.RegisterGRPCServerAuthFlags()
- servenv.RegisterServiceMapFlag()
- servenv.OnParseFor("vttablet", registerFlags)
-}
-
func main() {
- dbconfigs.RegisterFlags(dbconfigs.All...)
- mysqlctl.RegisterFlags()
-
- servenv.ParseFlags("vttablet")
- servenv.Init()
-
- if tabletPath == "" {
- log.Exit("--tablet-path required")
- }
- tabletAlias, err := topoproto.ParseTabletAlias(tabletPath)
- if err != nil {
- log.Exitf("failed to parse --tablet-path: %v", err)
- }
-
- // config and mycnf initializations are intertwined.
- config, mycnf := initConfig(tabletAlias)
-
- ts := topo.Open()
- qsc := createTabletServer(config, ts, tabletAlias)
-
- mysqld := mysqlctl.NewMysqld(config.DB)
- servenv.OnClose(mysqld.Close)
-
- if err := extractOnlineDDL(); err != nil {
- log.Exitf("failed to extract online DDL binaries: %v", err)
- }
-
- // Initialize and start tm.
- gRPCPort := int32(0)
- if servenv.GRPCPort() != 0 {
- gRPCPort = int32(servenv.GRPCPort())
- }
- tablet, err := tabletmanager.BuildTabletFromInput(tabletAlias, int32(servenv.Port()), gRPCPort, config.DB)
- if err != nil {
- log.Exitf("failed to parse --tablet-path: %v", err)
- }
- tm = &tabletmanager.TabletManager{
- BatchCtx: context.Background(),
- TopoServer: ts,
- Cnf: mycnf,
- MysqlDaemon: mysqld,
- DBConfigs: config.DB.Clone(),
- QueryServiceControl: qsc,
- UpdateStream: binlog.NewUpdateStream(ts, tablet.Keyspace, tabletAlias.Cell, qsc.SchemaEngine()),
- VREngine: vreplication.NewEngine(config, ts, tabletAlias.Cell, mysqld, qsc.LagThrottler()),
- VDiffEngine: vdiff.NewEngine(config, ts, tablet),
- }
- if err := tm.Start(tablet, config.Healthcheck.IntervalSeconds.Get()); err != nil {
- log.Exitf("failed to parse --tablet-path or initialize DB credentials: %v", err)
- }
- servenv.OnClose(func() {
- // Close the tm so that our topo entry gets pruned properly and any
- // background goroutines that use the topo connection are stopped.
- tm.Close()
-
- // tm uses ts. So, it should be closed after tm.
- ts.Close()
- })
-
- servenv.RunDefault()
-}
-
-func initConfig(tabletAlias *topodatapb.TabletAlias) (*tabletenv.TabletConfig, *mysqlctl.Mycnf) {
- tabletenv.Init()
- // Load current config after tabletenv.Init, because it changes it.
- config := tabletenv.NewCurrentConfig()
- if err := config.Verify(); err != nil {
- log.Exitf("invalid config: %v", err)
- }
-
- if tabletConfig != "" {
- bytes, err := os.ReadFile(tabletConfig)
- if err != nil {
- log.Exitf("error reading config file %s: %v", tabletConfig, err)
- }
- if err := yaml2.Unmarshal(bytes, config); err != nil {
- log.Exitf("error parsing config file %s: %v", bytes, err)
- }
- }
- gotBytes, _ := yaml2.Marshal(config)
- log.Infof("Loaded config file %s successfully:\n%s", tabletConfig, gotBytes)
-
- var mycnf *mysqlctl.Mycnf
- var socketFile string
- // If no connection parameters were specified, load the mycnf file
- // and use the socket from it. If connection parameters were specified,
- // we assume that the mysql is not local, and we skip loading mycnf.
- // This also means that backup and restore will not be allowed.
- if !config.DB.HasGlobalSettings() {
- var err error
- if mycnf, err = mysqlctl.NewMycnfFromFlags(tabletAlias.Uid); err != nil {
- log.Exitf("mycnf read failed: %v", err)
- }
- socketFile = mycnf.SocketFile
- } else {
- log.Info("connection parameters were specified. Not loading my.cnf.")
- }
-
- // If connection parameters were specified, socketFile will be empty.
- // Otherwise, the socketFile (read from mycnf) will be used to initialize
- // dbconfigs.
- config.DB.InitWithSocket(socketFile)
- for _, cfg := range config.ExternalConnections {
- cfg.InitWithSocket("")
- }
- return config, mycnf
-}
-
-// extractOnlineDDL extracts the gh-ost binary from this executable. gh-ost is appended
-// to vttablet executable by `make build` with a go:embed
-func extractOnlineDDL() error {
- if binaryFileName, isOverride := onlineddl.GhostBinaryFileName(); !isOverride {
- if err := os.WriteFile(binaryFileName, resources.GhostBinary, 0755); err != nil {
- // One possibility of failure is that gh-ost is up and running. In that case,
- // let's pause and check if the running gh-ost is exact same binary as the one we wish to extract.
- foundBytes, _ := os.ReadFile(binaryFileName)
- if bytes.Equal(resources.GhostBinary, foundBytes) {
- // OK, it's the same binary, there is no need to extract the file anyway
- return nil
- }
- return err
- }
- }
-
- return nil
-}
-
-func createTabletServer(config *tabletenv.TabletConfig, ts *topo.Server, tabletAlias *topodatapb.TabletAlias) *tabletserver.TabletServer {
- if tableACLConfig != "" {
- // To override default simpleacl, other ACL plugins must set themselves to be default ACL factory
- tableacl.Register("simpleacl", &simpleacl.Factory{})
- } else if enforceTableACLConfig {
- log.Exit("table acl config has to be specified with table-acl-config flag because enforce-tableacl-config is set.")
+ if err := cli.Main.Execute(); err != nil {
+ log.Exit(err)
}
- // creates and registers the query service
- qsc := tabletserver.NewTabletServer("", config, ts, tabletAlias)
- servenv.OnRun(func() {
- qsc.Register()
- addStatusParts(qsc)
- })
- servenv.OnClose(qsc.StopService)
- qsc.InitACL(tableACLConfig, enforceTableACLConfig, tableACLConfigReloadInterval)
- return qsc
}
diff --git a/go/cmd/vttestserver/data/schema/app_customer/v001__create_customer_table.sql b/go/cmd/vttestserver/cli/data/schema/app_customer/v001__create_customer_table.sql
similarity index 100%
rename from go/cmd/vttestserver/data/schema/app_customer/v001__create_customer_table.sql
rename to go/cmd/vttestserver/cli/data/schema/app_customer/v001__create_customer_table.sql
diff --git a/go/cmd/vttestserver/data/schema/app_customer/v002__add_customer_vschema.sql b/go/cmd/vttestserver/cli/data/schema/app_customer/v002__add_customer_vschema.sql
similarity index 100%
rename from go/cmd/vttestserver/data/schema/app_customer/v002__add_customer_vschema.sql
rename to go/cmd/vttestserver/cli/data/schema/app_customer/v002__add_customer_vschema.sql
diff --git a/go/cmd/vttestserver/data/schema/app_customer/vschema.json b/go/cmd/vttestserver/cli/data/schema/app_customer/vschema.json
similarity index 100%
rename from go/cmd/vttestserver/data/schema/app_customer/vschema.json
rename to go/cmd/vttestserver/cli/data/schema/app_customer/vschema.json
diff --git a/go/cmd/vttestserver/data/schema/test_keyspace/v001__create_test_table.sql b/go/cmd/vttestserver/cli/data/schema/test_keyspace/v001__create_test_table.sql
similarity index 100%
rename from go/cmd/vttestserver/data/schema/test_keyspace/v001__create_test_table.sql
rename to go/cmd/vttestserver/cli/data/schema/test_keyspace/v001__create_test_table.sql
diff --git a/go/cmd/vttestserver/data/schema/test_keyspace/v002__create_hash_vindex.sql b/go/cmd/vttestserver/cli/data/schema/test_keyspace/v002__create_hash_vindex.sql
similarity index 100%
rename from go/cmd/vttestserver/data/schema/test_keyspace/v002__create_hash_vindex.sql
rename to go/cmd/vttestserver/cli/data/schema/test_keyspace/v002__create_hash_vindex.sql
diff --git a/go/cmd/vttestserver/data/schema/test_keyspace/v003__add_table_vschema.sql b/go/cmd/vttestserver/cli/data/schema/test_keyspace/v003__add_table_vschema.sql
similarity index 100%
rename from go/cmd/vttestserver/data/schema/test_keyspace/v003__add_table_vschema.sql
rename to go/cmd/vttestserver/cli/data/schema/test_keyspace/v003__add_table_vschema.sql
diff --git a/go/cmd/vttestserver/data/schema/test_keyspace/v004__create_test_table1.sql b/go/cmd/vttestserver/cli/data/schema/test_keyspace/v004__create_test_table1.sql
similarity index 100%
rename from go/cmd/vttestserver/data/schema/test_keyspace/v004__create_test_table1.sql
rename to go/cmd/vttestserver/cli/data/schema/test_keyspace/v004__create_test_table1.sql
diff --git a/go/cmd/vttestserver/cli/main.go b/go/cmd/vttestserver/cli/main.go
new file mode 100644
index 00000000000..f9a2f16cd87
--- /dev/null
+++ b/go/cmd/vttestserver/cli/main.go
@@ -0,0 +1,308 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// vttestserver allows users to spawn a self-contained Vitess server for local testing/CI.
+package cli
+
+import (
+ "encoding/json"
+ "fmt"
+ "os"
+ "os/signal"
+ "strconv"
+ "strings"
+ "syscall"
+ "time"
+
+ "github.com/spf13/cobra"
+ "google.golang.org/protobuf/encoding/prototext"
+
+ "vitess.io/vitess/go/acl"
+ "vitess.io/vitess/go/vt/log"
+ "vitess.io/vitess/go/vt/servenv"
+ "vitess.io/vitess/go/vt/vttest"
+
+ vttestpb "vitess.io/vitess/go/vt/proto/vttest"
+)
+
+type topoFlags struct {
+ cells []string
+ keyspaces []string
+ shards []string
+ replicas int
+ rdonly int
+}
+
+var (
+ basePort int
+ config vttest.Config
+ doSeed bool
+ mycnf string
+ protoTopo string
+ seed vttest.SeedConfig
+ topo topoFlags
+)
+
+func (t *topoFlags) buildTopology() (*vttestpb.VTTestTopology, error) {
+ topo := &vttestpb.VTTestTopology{}
+ topo.Cells = t.cells
+
+ keyspaces := t.keyspaces
+ shardCounts := t.shards
+ if len(keyspaces) != len(shardCounts) {
+ return nil, fmt.Errorf("--keyspaces must be same length as --shards")
+ }
+
+ for i := range keyspaces {
+ name := keyspaces[i]
+ numshards, err := strconv.ParseInt(shardCounts[i], 10, 32)
+ if err != nil {
+ return nil, err
+ }
+
+ ks := &vttestpb.Keyspace{
+ Name: name,
+ ReplicaCount: int32(t.replicas),
+ RdonlyCount: int32(t.rdonly),
+ }
+
+ for _, shardname := range vttest.GetShardNames(int(numshards)) {
+ ks.Shards = append(ks.Shards, &vttestpb.Shard{
+ Name: shardname,
+ })
+ }
+
+ topo.Keyspaces = append(topo.Keyspaces, ks)
+ }
+
+ return topo, nil
+}
+
+func init() {
+ servenv.RegisterFlags()
+ servenv.RegisterGRPCServerFlags()
+ servenv.RegisterGRPCServerAuthFlags()
+ servenv.RegisterServiceMapFlag()
+}
+
+func New() (cmd *cobra.Command) {
+ cmd = &cobra.Command{
+ Use: "vttestserver",
+ Short: "vttestserver allows users to spawn a self-contained Vitess server for local testing/CI.",
+ Args: cobra.NoArgs,
+ PreRunE: servenv.CobraPreRunE,
+ RunE: run,
+ }
+
+ servenv.MoveFlagsToCobraCommand(cmd)
+
+ cmd.Flags().IntVar(&basePort, "port", 0,
+ "Port to use for vtcombo. If this is 0, a random port will be chosen.")
+
+ cmd.Flags().StringVar(&protoTopo, "proto_topo", "",
+ "Define the fake cluster topology as a compact text format encoded"+
+ " vttest proto. See vttest.proto for more information.")
+
+ cmd.Flags().StringVar(&config.SchemaDir, "schema_dir", "",
+ "Directory for initial schema files. Within this dir,"+
+ " there should be a subdir for each keyspace. Within"+
+ " each keyspace dir, each file is executed as SQL"+
+ " after the database is created on each shard."+
+ " If the directory contains a vschema.json file, it"+
+ " will be used as the vschema for the V3 API.")
+
+ cmd.Flags().StringVar(&config.DefaultSchemaDir, "default_schema_dir", "",
+ "Default directory for initial schema files. If no schema is found"+
+ " in schema_dir, default to this location.")
+
+ cmd.Flags().StringVar(&config.DataDir, "data_dir", "",
+ "Directory where the data files will be placed, defaults to a random "+
+ "directory under /vt/vtdataroot")
+
+ cmd.Flags().BoolVar(&config.OnlyMySQL, "mysql_only", false,
+ "If this flag is set only mysql is initialized."+
+ " The rest of the vitess components are not started."+
+ " Also, the output specifies the mysql unix socket"+
+ " instead of the vtgate port.")
+
+ cmd.Flags().BoolVar(&config.PersistentMode, "persistent_mode", false,
+ "If this flag is set, the MySQL data directory is not cleaned up"+
+ " when LocalCluster.TearDown() is called. This is useful for running"+
+ " vttestserver as a database container in local developer environments. Note"+
+ " that db migration files (--schema_dir option) and seeding of"+
+ " random data (--initialize_with_random_data option) will only run during"+
+ " cluster startup if the data directory does not already exist. "+
+ " Changes to VSchema are persisted across cluster restarts using a simple"+
+ " watcher if the --data_dir argument is specified.")
+
+ cmd.Flags().BoolVar(&doSeed, "initialize_with_random_data", false,
+ "If this flag is each table-shard will be initialized"+
+ " with random data. See also the 'rng_seed' and 'min_shard_size'"+
+ " and 'max_shard_size' flags.")
+
+ cmd.Flags().IntVar(&seed.RngSeed, "rng_seed", 123,
+ "The random number generator seed to use when initializing"+
+ " with random data (see also --initialize_with_random_data)."+
+ " Multiple runs with the same seed will result with the same"+
+ " initial data.")
+
+ cmd.Flags().IntVar(&seed.MinSize, "min_table_shard_size", 1000,
+ "The minimum number of initial rows in a table shard. Ignored if"+
+ "--initialize_with_random_data is false. The actual number is chosen"+
+ " randomly.")
+
+ cmd.Flags().IntVar(&seed.MaxSize, "max_table_shard_size", 10000,
+ "The maximum number of initial rows in a table shard. Ignored if"+
+ "--initialize_with_random_data is false. The actual number is chosen"+
+ " randomly")
+
+ cmd.Flags().Float64Var(&seed.NullProbability, "null_probability", 0.1,
+ "The probability to initialize a field with 'NULL' "+
+ " if --initialize_with_random_data is true. Only applies to fields"+
+ " that can contain NULL values.")
+
+ cmd.Flags().StringVar(&config.MySQLBindHost, "mysql_bind_host", "localhost",
+ "which host to bind vtgate mysql listener to")
+
+ cmd.Flags().StringVar(&mycnf, "extra_my_cnf", "",
+ "extra files to add to the config, separated by ':'")
+
+ cmd.Flags().StringSliceVar(&topo.cells, "cells", []string{"test"}, "Comma separated list of cells")
+ cmd.Flags().StringSliceVar(&topo.keyspaces, "keyspaces", []string{"test_keyspace"},
+ "Comma separated list of keyspaces")
+ cmd.Flags().StringSliceVar(&topo.shards, "num_shards", []string{"2"},
+ "Comma separated shard count (one per keyspace)")
+ cmd.Flags().IntVar(&topo.replicas, "replica_count", 2,
+ "Replica tablets per shard (includes primary)")
+ cmd.Flags().IntVar(&topo.rdonly, "rdonly_count", 1,
+ "Rdonly tablets per shard")
+
+ cmd.Flags().StringVar(&config.Charset, "charset", "utf8mb4", "MySQL charset")
+
+ cmd.Flags().StringVar(&config.PlannerVersion, "planner-version", "", "Sets the default planner to use when the session has not changed it. Valid values are: Gen4, Gen4Greedy, Gen4Left2Right")
+
+ cmd.Flags().StringVar(&config.SnapshotFile, "snapshot_file", "",
+ "A MySQL DB snapshot file")
+
+ cmd.Flags().BoolVar(&config.EnableSystemSettings, "enable_system_settings", true, "This will enable the system settings to be changed per session at the database connection level")
+
+ cmd.Flags().StringVar(&config.TransactionMode, "transaction_mode", "MULTI", "Transaction mode MULTI (default), SINGLE or TWOPC ")
+ cmd.Flags().Float64Var(&config.TransactionTimeout, "queryserver-config-transaction-timeout", 0, "query server transaction timeout (in seconds), a transaction will be killed if it takes longer than this value")
+
+ cmd.Flags().StringVar(&config.TabletHostName, "tablet_hostname", "localhost", "The hostname to use for the tablet otherwise it will be derived from OS' hostname")
+
+ cmd.Flags().StringVar(&config.VSchemaDDLAuthorizedUsers, "vschema_ddl_authorized_users", "", "Comma separated list of users authorized to execute vschema ddl operations via vtgate")
+
+ cmd.Flags().StringVar(&config.ForeignKeyMode, "foreign_key_mode", "allow", "This is to provide how to handle foreign key constraint in create/alter table. Valid values are: allow, disallow")
+ cmd.Flags().BoolVar(&config.EnableOnlineDDL, "enable_online_ddl", true, "Allow users to submit, review and control Online DDL")
+ cmd.Flags().BoolVar(&config.EnableDirectDDL, "enable_direct_ddl", true, "Allow users to submit direct DDL statements")
+
+ // flags for using an actual topo implementation for vtcombo instead of in-memory topo. useful for test setup where an external topo server is shared across multiple vtcombo processes or other components
+ cmd.Flags().StringVar(&config.ExternalTopoImplementation, "external_topo_implementation", "", "the topology implementation to use for vtcombo process")
+ cmd.Flags().StringVar(&config.ExternalTopoGlobalServerAddress, "external_topo_global_server_address", "", "the address of the global topology server for vtcombo process")
+ cmd.Flags().StringVar(&config.ExternalTopoGlobalRoot, "external_topo_global_root", "", "the path of the global topology data in the global topology server for vtcombo process")
+
+ cmd.Flags().DurationVar(&config.VtgateTabletRefreshInterval, "tablet_refresh_interval", 10*time.Second, "Interval at which vtgate refreshes tablet information from topology server.")
+ acl.RegisterFlags(cmd.Flags())
+
+ return cmd
+}
+
+func newEnv() (env vttest.Environment, err error) {
+ if basePort != 0 {
+ if config.DataDir == "" {
+ env, err = vttest.NewLocalTestEnv("", basePort)
+ if err != nil {
+ return
+ }
+ } else {
+ env, err = vttest.NewLocalTestEnvWithDirectory("", basePort, config.DataDir)
+ if err != nil {
+ return
+ }
+ }
+ }
+
+ if protoTopo == "" {
+ config.Topology, err = topo.buildTopology()
+ if err != nil {
+ return
+ }
+ } else {
+ var topology vttestpb.VTTestTopology
+ err = prototext.Unmarshal([]byte(protoTopo), &topology)
+ if err != nil {
+ return
+ }
+ if len(topology.Cells) == 0 {
+ topology.Cells = append(topology.Cells, "test")
+ }
+ config.Topology = &topology
+ }
+
+ if doSeed {
+ config.Seed = &seed
+ }
+
+ if mycnf != "" {
+ config.ExtraMyCnf = strings.Split(mycnf, ":")
+ }
+
+ return
+}
+
+func run(cmd *cobra.Command, args []string) error {
+ cluster, err := runCluster()
+ if err != nil {
+ return err
+ }
+ defer cluster.TearDown()
+
+ servenv.Init()
+
+ kvconf := cluster.JSONConfig()
+ if err := json.NewEncoder(os.Stdout).Encode(kvconf); err != nil {
+ return err
+ }
+
+ c := make(chan os.Signal, 1)
+ signal.Notify(c, os.Interrupt, syscall.SIGTERM)
+ <-c
+
+ return nil
+}
+
+func runCluster() (cluster vttest.LocalCluster, err error) {
+ env, err := newEnv()
+ if err != nil {
+ return
+ }
+
+ log.Infof("Starting local cluster...")
+ log.Infof("config: %#v", config)
+ cluster = vttest.LocalCluster{
+ Config: config,
+ Env: env,
+ }
+ err = cluster.Setup()
+ if err != nil {
+ return cluster, err
+ }
+
+ log.Info("Local cluster started.")
+
+ return cluster, nil
+}
diff --git a/go/cmd/vttestserver/vttestserver_test.go b/go/cmd/vttestserver/cli/main_test.go
similarity index 89%
rename from go/cmd/vttestserver/vttestserver_test.go
rename to go/cmd/vttestserver/cli/main_test.go
index 0665d5f9c46..39dc8e4ea78 100644
--- a/go/cmd/vttestserver/vttestserver_test.go
+++ b/go/cmd/vttestserver/cli/main_test.go
@@ -14,14 +14,13 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package cli
import (
"context"
"fmt"
"io"
"math/rand"
- "os"
"os/exec"
"path"
"strings"
@@ -54,9 +53,8 @@ type columnVindex struct {
}
func TestRunsVschemaMigrations(t *testing.T) {
- args := os.Args
conf := config
- defer resetFlags(args, conf)
+ defer resetConfig(conf)
cluster, err := startCluster()
defer cluster.TearDown()
@@ -72,17 +70,22 @@ func TestRunsVschemaMigrations(t *testing.T) {
}
func TestPersistentMode(t *testing.T) {
- args := os.Args
conf := config
- defer resetFlags(args, conf)
+ defer resetConfig(conf)
dir := t.TempDir()
cluster, err := startPersistentCluster(dir)
assert.NoError(t, err)
- // basic sanity checks similar to TestRunsVschemaMigrations
+ // Add a new "ad-hoc" vindex via vtgate once the cluster is up, to later make sure it is persisted across teardowns
+ err = addColumnVindex(cluster, "test_keyspace", "alter vschema on persistence_test add vindex my_vdx(id)")
+ assert.NoError(t, err)
+
+ // Basic sanity checks similar to TestRunsVschemaMigrations
+ // See go/cmd/vttestserver/data/schema/app_customer/* and go/cmd/vttestserver/data/schema/test_keyspace/*
assertColumnVindex(t, cluster, columnVindex{keyspace: "test_keyspace", table: "test_table", vindex: "my_vdx", vindexType: "hash", column: "id"})
+ assertColumnVindex(t, cluster, columnVindex{keyspace: "test_keyspace", table: "persistence_test", vindex: "my_vdx", vindexType: "hash", column: "id"})
assertColumnVindex(t, cluster, columnVindex{keyspace: "app_customer", table: "customers", vindex: "hash", vindexType: "hash", column: "id"})
// insert some data to ensure persistence across teardowns
@@ -108,11 +111,15 @@ func TestPersistentMode(t *testing.T) {
// reboot the persistent cluster
cluster.TearDown()
cluster, err = startPersistentCluster(dir)
- defer cluster.TearDown()
+ defer func() {
+ cluster.PersistentMode = false // Cleanup the tmpdir as we're done
+ cluster.TearDown()
+ }()
assert.NoError(t, err)
- // rerun our sanity checks to make sure vschema migrations are run during every startup
+ // rerun our sanity checks to make sure vschema is persisted correctly
assertColumnVindex(t, cluster, columnVindex{keyspace: "test_keyspace", table: "test_table", vindex: "my_vdx", vindexType: "hash", column: "id"})
+ assertColumnVindex(t, cluster, columnVindex{keyspace: "test_keyspace", table: "persistence_test", vindex: "my_vdx", vindexType: "hash", column: "id"})
assertColumnVindex(t, cluster, columnVindex{keyspace: "app_customer", table: "customers", vindex: "hash", vindexType: "hash", column: "id"})
// ensure previous data was successfully persisted
@@ -125,9 +132,8 @@ func TestPersistentMode(t *testing.T) {
}
func TestForeignKeysAndDDLModes(t *testing.T) {
- args := os.Args
conf := config
- defer resetFlags(args, conf)
+ defer resetConfig(conf)
cluster, err := startCluster("--foreign_key_mode=allow", "--enable_online_ddl=true", "--enable_direct_ddl=true")
assert.NoError(t, err)
@@ -180,9 +186,8 @@ func TestForeignKeysAndDDLModes(t *testing.T) {
}
func TestCanGetKeyspaces(t *testing.T) {
- args := os.Args
conf := config
- defer resetFlags(args, conf)
+ defer resetConfig(conf)
cluster, err := startCluster()
assert.NoError(t, err)
@@ -192,9 +197,8 @@ func TestCanGetKeyspaces(t *testing.T) {
}
func TestExternalTopoServerConsul(t *testing.T) {
- args := os.Args
conf := config
- defer resetFlags(args, conf)
+ defer resetConfig(conf)
// Start a single consul in the background.
cmd, serverAddr := startConsul(t)
@@ -218,9 +222,8 @@ func TestExternalTopoServerConsul(t *testing.T) {
}
func TestMtlsAuth(t *testing.T) {
- args := os.Args
conf := config
- defer resetFlags(args, conf)
+ defer resetConfig(conf)
// Our test root.
root := t.TempDir()
@@ -249,7 +252,10 @@ func TestMtlsAuth(t *testing.T) {
fmt.Sprintf("--vtctld_grpc_ca=%s", caCert),
fmt.Sprintf("--grpc_auth_mtls_allowed_substrings=%s", "CN=ClientApp"))
assert.NoError(t, err)
- defer cluster.TearDown()
+ defer func() {
+ cluster.PersistentMode = false // Cleanup the tmpdir as we're done
+ cluster.TearDown()
+ }()
// startCluster will apply vschema migrations using vtctl grpc and the clientCert.
assertColumnVindex(t, cluster, columnVindex{keyspace: "test_keyspace", table: "test_table", vindex: "my_vdx", vindexType: "hash", column: "id"})
@@ -257,9 +263,8 @@ func TestMtlsAuth(t *testing.T) {
}
func TestMtlsAuthUnauthorizedFails(t *testing.T) {
- args := os.Args
conf := config
- defer resetFlags(args, conf)
+ defer resetConfig(conf)
// Our test root.
root := t.TempDir()
@@ -309,15 +314,21 @@ var clusterKeyspaces = []string{
"app_customer",
}
-func startCluster(flags ...string) (vttest.LocalCluster, error) {
- os.Args = []string{"vttestserver"}
+func startCluster(flags ...string) (cluster vttest.LocalCluster, err error) {
+ args := []string{"vttestserver"}
schemaDirArg := "--schema_dir=data/schema"
tabletHostname := "--tablet_hostname=localhost"
keyspaceArg := "--keyspaces=" + strings.Join(clusterKeyspaces, ",")
numShardsArg := "--num_shards=2,2"
vschemaDDLAuthorizedUsers := "--vschema_ddl_authorized_users=%"
- os.Args = append(os.Args, []string{schemaDirArg, keyspaceArg, numShardsArg, tabletHostname, vschemaDDLAuthorizedUsers}...)
- os.Args = append(os.Args, flags...)
+ alsoLogToStderr := "--alsologtostderr" // better debugging
+ args = append(args, []string{schemaDirArg, keyspaceArg, numShardsArg, tabletHostname, vschemaDDLAuthorizedUsers, alsoLogToStderr}...)
+ args = append(args, flags...)
+
+ if err = New().ParseFlags(args); err != nil {
+ return
+ }
+
return runCluster()
}
@@ -370,8 +381,7 @@ func assertEqual(t *testing.T, actual string, expected string, message string) {
}
}
-func resetFlags(args []string, conf vttest.Config) {
- os.Args = args
+func resetConfig(conf vttest.Config) {
config = conf
}
diff --git a/go/cmd/vttestserver/docgen/main.go b/go/cmd/vttestserver/docgen/main.go
new file mode 100644
index 00000000000..61f982e2e56
--- /dev/null
+++ b/go/cmd/vttestserver/docgen/main.go
@@ -0,0 +1,37 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/cmd/internal/docgen"
+ "vitess.io/vitess/go/cmd/vttestserver/cli"
+)
+
+func main() {
+ var dir string
+ cmd := cobra.Command{
+ Use: "docgen [-d ]",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return docgen.GenerateMarkdownTree(cli.New(), dir)
+ },
+ }
+
+ cmd.Flags().StringVarP(&dir, "dir", "d", "doc", "output directory to write documentation")
+ _ = cmd.Execute()
+}
diff --git a/go/cmd/vttestserver/main.go b/go/cmd/vttestserver/main.go
index d15d26c2894..95e63fa8019 100644
--- a/go/cmd/vttestserver/main.go
+++ b/go/cmd/vttestserver/main.go
@@ -14,293 +14,15 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-// vttestserver allows users to spawn a self-contained Vitess server for local testing/CI.
package main
import (
- "encoding/json"
- "fmt"
- "os"
- "os/signal"
- "strconv"
- "strings"
- "sync"
- "syscall"
- "time"
-
- "github.com/spf13/pflag"
- "google.golang.org/protobuf/encoding/prototext"
-
- "vitess.io/vitess/go/acl"
+ "vitess.io/vitess/go/cmd/vttestserver/cli"
"vitess.io/vitess/go/vt/log"
- "vitess.io/vitess/go/vt/servenv"
- "vitess.io/vitess/go/vt/vttest"
-
- vttestpb "vitess.io/vitess/go/vt/proto/vttest"
)
-type topoFlags struct {
- cells []string
- keyspaces []string
- shards []string
- replicas int
- rdonly int
-}
-
-var (
- basePort int
- config vttest.Config
- doSeed bool
- mycnf string
- protoTopo string
- seed vttest.SeedConfig
- topo topoFlags
-)
-
-func registerFlags(fs *pflag.FlagSet) {
- fs.IntVar(&basePort, "port", 0,
- "Port to use for vtcombo. If this is 0, a random port will be chosen.")
-
- fs.StringVar(&protoTopo, "proto_topo", "",
- "Define the fake cluster topology as a compact text format encoded"+
- " vttest proto. See vttest.proto for more information.")
-
- fs.StringVar(&config.SchemaDir, "schema_dir", "",
- "Directory for initial schema files. Within this dir,"+
- " there should be a subdir for each keyspace. Within"+
- " each keyspace dir, each file is executed as SQL"+
- " after the database is created on each shard."+
- " If the directory contains a vschema.json file, it"+
- " will be used as the vschema for the V3 API.")
-
- fs.StringVar(&config.DefaultSchemaDir, "default_schema_dir", "",
- "Default directory for initial schema files. If no schema is found"+
- " in schema_dir, default to this location.")
-
- fs.StringVar(&config.DataDir, "data_dir", "",
- "Directory where the data files will be placed, defaults to a random "+
- "directory under /vt/vtdataroot")
-
- fs.BoolVar(&config.OnlyMySQL, "mysql_only", false,
- "If this flag is set only mysql is initialized."+
- " The rest of the vitess components are not started."+
- " Also, the output specifies the mysql unix socket"+
- " instead of the vtgate port.")
-
- fs.BoolVar(&config.PersistentMode, "persistent_mode", false,
- "If this flag is set, the MySQL data directory is not cleaned up"+
- " when LocalCluster.TearDown() is called. This is useful for running"+
- " vttestserver as a database container in local developer environments. Note"+
- " that db migration files (--schema_dir option) and seeding of"+
- " random data (--initialize_with_random_data option) will only run during"+
- " cluster startup if the data directory does not already exist. vschema"+
- " migrations are run every time the cluster starts, since persistence"+
- " for the topology server has not been implemented yet")
-
- fs.BoolVar(&doSeed, "initialize_with_random_data", false,
- "If this flag is each table-shard will be initialized"+
- " with random data. See also the 'rng_seed' and 'min_shard_size'"+
- " and 'max_shard_size' flags.")
-
- fs.IntVar(&seed.RngSeed, "rng_seed", 123,
- "The random number generator seed to use when initializing"+
- " with random data (see also --initialize_with_random_data)."+
- " Multiple runs with the same seed will result with the same"+
- " initial data.")
-
- fs.IntVar(&seed.MinSize, "min_table_shard_size", 1000,
- "The minimum number of initial rows in a table shard. Ignored if"+
- "--initialize_with_random_data is false. The actual number is chosen"+
- " randomly.")
-
- fs.IntVar(&seed.MaxSize, "max_table_shard_size", 10000,
- "The maximum number of initial rows in a table shard. Ignored if"+
- "--initialize_with_random_data is false. The actual number is chosen"+
- " randomly")
-
- fs.Float64Var(&seed.NullProbability, "null_probability", 0.1,
- "The probability to initialize a field with 'NULL' "+
- " if --initialize_with_random_data is true. Only applies to fields"+
- " that can contain NULL values.")
-
- fs.StringVar(&config.MySQLBindHost, "mysql_bind_host", "localhost",
- "which host to bind vtgate mysql listener to")
-
- fs.StringVar(&mycnf, "extra_my_cnf", "",
- "extra files to add to the config, separated by ':'")
-
- fs.StringSliceVar(&topo.cells, "cells", []string{"test"}, "Comma separated list of cells")
- fs.StringSliceVar(&topo.keyspaces, "keyspaces", []string{"test_keyspace"},
- "Comma separated list of keyspaces")
- fs.StringSliceVar(&topo.shards, "num_shards", []string{"2"},
- "Comma separated shard count (one per keyspace)")
- fs.IntVar(&topo.replicas, "replica_count", 2,
- "Replica tablets per shard (includes primary)")
- fs.IntVar(&topo.rdonly, "rdonly_count", 1,
- "Rdonly tablets per shard")
-
- fs.StringVar(&config.Charset, "charset", "utf8mb4", "MySQL charset")
-
- fs.StringVar(&config.PlannerVersion, "planner-version", "", "Sets the default planner to use when the session has not changed it. Valid values are: V3, V3Insert, Gen4, Gen4Greedy and Gen4Fallback. Gen4Fallback tries the new gen4 planner and falls back to the V3 planner if the gen4 fails.")
-
- fs.StringVar(&config.SnapshotFile, "snapshot_file", "",
- "A MySQL DB snapshot file")
-
- fs.BoolVar(&config.EnableSystemSettings, "enable_system_settings", true, "This will enable the system settings to be changed per session at the database connection level")
-
- fs.StringVar(&config.TransactionMode, "transaction_mode", "MULTI", "Transaction mode MULTI (default), SINGLE or TWOPC ")
- fs.Float64Var(&config.TransactionTimeout, "queryserver-config-transaction-timeout", 0, "query server transaction timeout (in seconds), a transaction will be killed if it takes longer than this value")
-
- fs.StringVar(&config.TabletHostName, "tablet_hostname", "localhost", "The hostname to use for the tablet otherwise it will be derived from OS' hostname")
-
- fs.StringVar(&config.VSchemaDDLAuthorizedUsers, "vschema_ddl_authorized_users", "", "Comma separated list of users authorized to execute vschema ddl operations via vtgate")
-
- fs.StringVar(&config.ForeignKeyMode, "foreign_key_mode", "allow", "This is to provide how to handle foreign key constraint in create/alter table. Valid values are: allow, disallow")
- fs.BoolVar(&config.EnableOnlineDDL, "enable_online_ddl", true, "Allow users to submit, review and control Online DDL")
- fs.BoolVar(&config.EnableDirectDDL, "enable_direct_ddl", true, "Allow users to submit direct DDL statements")
-
- // flags for using an actual topo implementation for vtcombo instead of in-memory topo. useful for test setup where an external topo server is shared across multiple vtcombo processes or other components
- fs.StringVar(&config.ExternalTopoImplementation, "external_topo_implementation", "", "the topology implementation to use for vtcombo process")
- fs.StringVar(&config.ExternalTopoGlobalServerAddress, "external_topo_global_server_address", "", "the address of the global topology server for vtcombo process")
- fs.StringVar(&config.ExternalTopoGlobalRoot, "external_topo_global_root", "", "the path of the global topology data in the global topology server for vtcombo process")
-
- fs.DurationVar(&config.VtgateTabletRefreshInterval, "tablet_refresh_interval", 10*time.Second, "Interval at which vtgate refreshes tablet information from topology server.")
- acl.RegisterFlags(fs)
-}
-
-func init() {
- servenv.OnParseFor("vttestserver", registerFlags)
-}
-
-func (t *topoFlags) buildTopology() (*vttestpb.VTTestTopology, error) {
- topo := &vttestpb.VTTestTopology{}
- topo.Cells = t.cells
-
- keyspaces := t.keyspaces
- shardCounts := t.shards
- if len(keyspaces) != len(shardCounts) {
- return nil, fmt.Errorf("--keyspaces must be same length as --shards")
- }
-
- for i := range keyspaces {
- name := keyspaces[i]
- numshards, err := strconv.ParseInt(shardCounts[i], 10, 32)
- if err != nil {
- return nil, err
- }
-
- ks := &vttestpb.Keyspace{
- Name: name,
- ReplicaCount: int32(t.replicas),
- RdonlyCount: int32(t.rdonly),
- }
-
- for _, shardname := range vttest.GetShardNames(int(numshards)) {
- ks.Shards = append(ks.Shards, &vttestpb.Shard{
- Name: shardname,
- })
- }
-
- topo.Keyspaces = append(topo.Keyspaces, ks)
- }
-
- return topo, nil
-}
-
-// Annoying, but in unit tests, parseFlags gets called multiple times per process
-// (anytime startCluster is called), so we need to guard against the second test
-// to run failing with, for example:
-//
-// flag redefined: log_rotate_max_size
-var flagsOnce sync.Once
-
-func parseFlags() (env vttest.Environment, err error) {
- flagsOnce.Do(func() {
- servenv.RegisterFlags()
- servenv.RegisterGRPCServerFlags()
- servenv.RegisterGRPCServerAuthFlags()
- servenv.RegisterServiceMapFlag()
- })
-
- servenv.ParseFlags("vttestserver")
-
- if basePort != 0 {
- if config.DataDir == "" {
- env, err = vttest.NewLocalTestEnv("", basePort)
- if err != nil {
- return
- }
- } else {
- env, err = vttest.NewLocalTestEnvWithDirectory("", basePort, config.DataDir)
- if err != nil {
- return
- }
- }
- }
-
- if protoTopo == "" {
- config.Topology, err = topo.buildTopology()
- if err != nil {
- return
- }
- } else {
- var topology vttestpb.VTTestTopology
- err = prototext.Unmarshal([]byte(protoTopo), &topology)
- if err != nil {
- return
- }
- if len(topology.Cells) == 0 {
- topology.Cells = append(topology.Cells, "test")
- }
- config.Topology = &topology
- }
-
- if doSeed {
- config.Seed = &seed
- }
-
- if mycnf != "" {
- config.ExtraMyCnf = strings.Split(mycnf, ":")
- }
-
- return
-}
-
func main() {
- cluster, err := runCluster()
- servenv.Init()
- if err != nil {
+ if err := cli.New().Execute(); err != nil {
log.Fatal(err)
}
- defer cluster.TearDown()
-
- kvconf := cluster.JSONConfig()
- if err := json.NewEncoder(os.Stdout).Encode(kvconf); err != nil {
- log.Fatal(err)
- }
-
- c := make(chan os.Signal, 1)
- signal.Notify(c, os.Interrupt, syscall.SIGTERM)
- <-c
-}
-
-func runCluster() (vttest.LocalCluster, error) {
- env, err := parseFlags()
- if err != nil {
- log.Fatal(err)
- }
- log.Infof("Starting local cluster...")
- log.Infof("config: %#v", config)
- cluster := vttest.LocalCluster{
- Config: config,
- Env: env,
- }
- err = cluster.Setup()
- if err != nil {
- return cluster, err
- }
-
- log.Info("Local cluster started.")
-
- return cluster, nil
}
diff --git a/go/cmd/vttlstest/cli/vttlstest.go b/go/cmd/vttlstest/cli/vttlstest.go
new file mode 100644
index 00000000000..4e0f9c2b95e
--- /dev/null
+++ b/go/cmd/vttlstest/cli/vttlstest.go
@@ -0,0 +1,135 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreedto in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cli
+
+import (
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/vt/tlstest"
+)
+
+var (
+ root = "."
+ parent = "ca"
+ serial = "01"
+ commonName string
+
+ Root = &cobra.Command{
+ Use: "vttlstest",
+ Short: "vttlstest is a tool for generating test certificates, keys, and related artifacts for TLS tests.",
+ Long: "vttlstest is a tool for generating test certificates, keys, and related artifacts for TLS tests.",
+ }
+
+ createCACmd = &cobra.Command{
+ Use: "CreateCA [--root ]",
+ DisableFlagsInUseLine: true,
+ Example: "CreateCA --root /tmp",
+ Short: "Create certificate authority",
+ Long: "Create certificate authority",
+ Args: cobra.NoArgs,
+ Run: runCreateCA,
+ }
+
+ createIntermediateCACmd = &cobra.Command{
+ Use: "CreateIntermediateCA [--root ] [--parent ] [--serial ] [--common-name ] ",
+ DisableFlagsInUseLine: true,
+ Example: "CreateIntermediateCA --root /tmp --parent ca mail.mycoolsite.com",
+ Short: "Create intermediate certificate authority",
+ Long: "Create intermediate certificate authority",
+ Args: cobra.ExactArgs(1),
+ Run: runCreateIntermediateCA,
+ }
+
+ createCRLCmd = &cobra.Command{
+ Use: "CreateCRL [--root ] ",
+ DisableFlagsInUseLine: true,
+ Example: "CreateCRL --root /tmp mail.mycoolsite.com",
+ Short: "Create certificate revocation list",
+ Long: "Create certificate revocation list",
+ Args: cobra.ExactArgs(1),
+ Run: runCreateCRL,
+ }
+
+ createSignedCertCmd = &cobra.Command{
+ Use: "CreateSignedCert [--root ] [--parent ] [--serial ] [--common-name ] ",
+ DisableFlagsInUseLine: true,
+ Example: "CreateSignedCert --root /tmp --common-name mail.mysite.com --parent mail.mycoolsite.com postman1",
+ Short: "Create signed certificate",
+ Long: "Create signed certificate",
+ Args: cobra.ExactArgs(1),
+ Run: runCreateSignedCert,
+ }
+
+ revokeCertCmd = &cobra.Command{
+ Use: "RevokeCert [--root ] [--parent ] ",
+ DisableFlagsInUseLine: true,
+ Example: "RevokeCert --root /tmp --parent mail.mycoolsite.com postman1",
+ Short: "Revoke a certificate",
+ Long: "Revoke a certificate",
+ Args: cobra.ExactArgs(1),
+ Run: runRevokeCert,
+ }
+)
+
+func init() {
+ Root.PersistentFlags().StringVar(&root, "root", root, "root directory for all artifacts")
+
+ Root.AddCommand(createCACmd)
+ Root.AddCommand(createIntermediateCACmd)
+ Root.AddCommand(createCRLCmd)
+ Root.AddCommand(createSignedCertCmd)
+ Root.AddCommand(revokeCertCmd)
+
+ for _, cmd := range []*cobra.Command{createIntermediateCACmd, createSignedCertCmd} {
+ cmd.Flags().StringVar(&parent, "parent", parent, "Parent cert name to use. Use 'ca' for the toplevel CA.")
+ cmd.Flags().StringVar(&serial, "serial", serial, "Serial number for the certificate to create. Should be different for two certificates with the same parent.")
+ cmd.Flags().StringVar(&commonName, "common-name", commonName, "Common name for the certificate. If empty, uses the name.")
+ }
+ revokeCertCmd.Flags().StringVar(&parent, "parent", parent, "Parent cert name to use. Use 'ca' for the toplevel CA.")
+}
+
+func runCreateCA(cmd *cobra.Command, args []string) {
+ tlstest.CreateCA(root)
+}
+
+func runCreateIntermediateCA(cmd *cobra.Command, args []string) {
+ name := args[0]
+ if commonName == "" {
+ commonName = name
+ }
+
+ tlstest.CreateIntermediateCA(root, parent, serial, name, commonName)
+}
+
+func runCreateCRL(cmd *cobra.Command, args []string) {
+ ca := args[0]
+ tlstest.CreateCRL(root, ca)
+}
+
+func runCreateSignedCert(cmd *cobra.Command, args []string) {
+ name := args[0]
+ if commonName == "" {
+ commonName = name
+ }
+
+ tlstest.CreateSignedCert(root, parent, serial, name, commonName)
+}
+
+func runRevokeCert(cmd *cobra.Command, args []string) {
+ name := args[0]
+ tlstest.RevokeCertAndRegenerateCRL(root, parent, name)
+}
diff --git a/go/cmd/vttlstest/docgen/main.go b/go/cmd/vttlstest/docgen/main.go
new file mode 100644
index 00000000000..2354dceb493
--- /dev/null
+++ b/go/cmd/vttlstest/docgen/main.go
@@ -0,0 +1,37 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/cmd/internal/docgen"
+ "vitess.io/vitess/go/cmd/vttlstest/cli"
+)
+
+func main() {
+ var dir string
+ cmd := cobra.Command{
+ Use: "docgen [-d ]",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return docgen.GenerateMarkdownTree(cli.Root, dir)
+ },
+ }
+
+ cmd.Flags().StringVarP(&dir, "dir", "d", "doc", "output directory to write documentation")
+ _ = cmd.Execute()
+}
diff --git a/go/cmd/vttlstest/vttlstest.go b/go/cmd/vttlstest/vttlstest.go
index 78bffb813a3..08e994c096d 100644
--- a/go/cmd/vttlstest/vttlstest.go
+++ b/go/cmd/vttlstest/vttlstest.go
@@ -19,126 +19,14 @@ package main
import (
"github.com/spf13/cobra"
+ "vitess.io/vitess/go/cmd/vttlstest/cli"
"vitess.io/vitess/go/exit"
"vitess.io/vitess/go/vt/logutil"
- "vitess.io/vitess/go/vt/tlstest"
)
-var (
- root = "."
- parent = "ca"
- serial = "01"
- commonName string
-
- rootCmd = &cobra.Command{
- Use: "vttlstest",
- Short: "vttlstest is a tool for generating test certificates, keys, and related artifacts for TLS tests.",
- Long: "vttlstest is a tool for generating test certificates, keys, and related artifacts for TLS tests.",
- }
-
- createCACmd = &cobra.Command{
- Use: "CreateCA [--root ]",
- DisableFlagsInUseLine: true,
- Example: "CreateCA --root /tmp",
- Short: "Create certificate authority",
- Long: "Create certificate authority",
- Args: cobra.NoArgs,
- Run: runCreateCA,
- }
-
- createIntermediateCACmd = &cobra.Command{
- Use: "CreateIntermediateCA [--root ] [--parent ] [--serial ] [--common-name ] ",
- DisableFlagsInUseLine: true,
- Example: "CreateIntermediateCA --root /tmp --parent ca mail.mycoolsite.com",
- Short: "Create intermediate certificate authority",
- Long: "Create intermediate certificate authority",
- Args: cobra.ExactArgs(1),
- Run: runCreateIntermediateCA,
- }
-
- createCRLCmd = &cobra.Command{
- Use: "CreateCRL [--root ] ",
- DisableFlagsInUseLine: true,
- Example: "CreateCRL --root /tmp mail.mycoolsite.com",
- Short: "Create certificate revocation list",
- Long: "Create certificate revocation list",
- Args: cobra.ExactArgs(1),
- Run: runCreateCRL,
- }
-
- createSignedCertCmd = &cobra.Command{
- Use: "CreateSignedCert [--root ] [--parent ] [--serial ] [--common-name ] ",
- DisableFlagsInUseLine: true,
- Example: "CreateSignedCert --root /tmp --common-name mail.mysite.com --parent mail.mycoolsite.com postman1",
- Short: "Create signed certificate",
- Long: "Create signed certificate",
- Args: cobra.ExactArgs(1),
- Run: runCreateSignedCert,
- }
-
- revokeCertCmd = &cobra.Command{
- Use: "RevokeCert [--root ] [--parent ] ",
- DisableFlagsInUseLine: true,
- Example: "RevokeCert --root /tmp --parent mail.mycoolsite.com postman1",
- Short: "Revoke a certificate",
- Long: "Revoke a certificate",
- Args: cobra.ExactArgs(1),
- Run: runRevokeCert,
- }
-)
-
-func init() {
- rootCmd.PersistentFlags().StringVar(&root, "root", root, "root directory for all artifacts")
-
- rootCmd.AddCommand(createCACmd)
- rootCmd.AddCommand(createIntermediateCACmd)
- rootCmd.AddCommand(createCRLCmd)
- rootCmd.AddCommand(createSignedCertCmd)
- rootCmd.AddCommand(revokeCertCmd)
-
- for _, cmd := range []*cobra.Command{createIntermediateCACmd, createSignedCertCmd} {
- cmd.Flags().StringVar(&parent, "parent", parent, "Parent cert name to use. Use 'ca' for the toplevel CA.")
- cmd.Flags().StringVar(&serial, "serial", serial, "Serial number for the certificate to create. Should be different for two certificates with the same parent.")
- cmd.Flags().StringVar(&commonName, "common-name", commonName, "Common name for the certificate. If empty, uses the name.")
- }
- revokeCertCmd.Flags().StringVar(&parent, "parent", parent, "Parent cert name to use. Use 'ca' for the toplevel CA.")
-}
-
-func runCreateCA(cmd *cobra.Command, args []string) {
- tlstest.CreateCA(root)
-}
-
-func runCreateIntermediateCA(cmd *cobra.Command, args []string) {
- name := args[0]
- if commonName == "" {
- commonName = name
- }
-
- tlstest.CreateIntermediateCA(root, parent, serial, name, commonName)
-}
-
-func runCreateCRL(cmd *cobra.Command, args []string) {
- ca := args[0]
- tlstest.CreateCRL(root, ca)
-}
-
-func runCreateSignedCert(cmd *cobra.Command, args []string) {
- name := args[0]
- if commonName == "" {
- commonName = name
- }
-
- tlstest.CreateSignedCert(root, parent, serial, name, commonName)
-}
-
-func runRevokeCert(cmd *cobra.Command, args []string) {
- name := args[0]
- tlstest.RevokeCertAndRegenerateCRL(root, parent, name)
-}
-
func main() {
defer exit.Recover()
defer logutil.Flush()
- cobra.CheckErr(rootCmd.Execute())
+ cobra.CheckErr(cli.Root.Execute())
}
diff --git a/go/cmd/vtorc/status.go b/go/cmd/zk/command/add_auth.go
similarity index 56%
rename from go/cmd/vtorc/status.go
rename to go/cmd/zk/command/add_auth.go
index a4d8a59d3fc..566c463f4a8 100644
--- a/go/cmd/vtorc/status.go
+++ b/go/cmd/zk/command/add_auth.go
@@ -1,5 +1,5 @@
/*
-Copyright 2022 The Vitess Authors.
+Copyright 2023 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,17 +14,23 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package main
+package command
import (
- "vitess.io/vitess/go/vt/servenv"
- "vitess.io/vitess/go/vt/vtorc/logic"
+ "github.com/spf13/cobra"
)
-// addStatusParts adds UI parts to the /debug/status page of VTOrc
-func addStatusParts() {
- servenv.AddStatusPart("Recent Recoveries", logic.TopologyRecoveriesTemplate, func() any {
- recoveries, _ := logic.ReadRecentRecoveries(false, 0)
- return recoveries
- })
+var AddAuth = &cobra.Command{
+ Use: "addAuth ",
+ Args: cobra.ExactArgs(2),
+ RunE: commandAddAuth,
+}
+
+func commandAddAuth(cmd *cobra.Command, args []string) error {
+ scheme, auth := cmd.Flags().Arg(0), cmd.Flags().Arg(1)
+ return fs.Conn.AddAuth(cmd.Context(), scheme, []byte(auth))
+}
+
+func init() {
+ Root.AddCommand(AddAuth)
}
diff --git a/go/cmd/zk/command/cat.go b/go/cmd/zk/command/cat.go
new file mode 100644
index 00000000000..1d5460f7006
--- /dev/null
+++ b/go/cmd/zk/command/cat.go
@@ -0,0 +1,103 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreedto in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package command
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/spf13/cobra"
+ "github.com/z-division/go-zookeeper/zk"
+ "golang.org/x/term"
+
+ "vitess.io/vitess/go/cmd/zk/internal/zkfilepath"
+ "vitess.io/vitess/go/vt/log"
+ "vitess.io/vitess/go/vt/topo"
+ "vitess.io/vitess/go/vt/topo/zk2topo"
+)
+
+var (
+ catArgs = struct {
+ LongListing bool
+ Force bool
+ DecodeProto bool
+ }{}
+
+ Cat = &cobra.Command{
+ Use: "cat [ ...]",
+ Example: `zk cat /zk/path
+
+# List filename before file data
+zk cat -l /zk/path1 /zk/path2`,
+ Args: cobra.MinimumNArgs(1),
+ RunE: commandCat,
+ }
+)
+
+func commandCat(cmd *cobra.Command, args []string) error {
+ resolved, err := zk2topo.ResolveWildcards(cmd.Context(), fs.Conn, cmd.Flags().Args())
+ if err != nil {
+ return fmt.Errorf("cat: invalid wildcards: %w", err)
+ }
+ if len(resolved) == 0 {
+ // the wildcards didn't result in anything, we're done
+ return nil
+ }
+
+ hasError := false
+ for _, arg := range resolved {
+ zkPath := zkfilepath.Clean(arg)
+ data, _, err := fs.Conn.Get(cmd.Context(), zkPath)
+ if err != nil {
+ hasError = true
+ if !catArgs.Force || err != zk.ErrNoNode {
+ log.Warningf("cat: cannot access %v: %v", zkPath, err)
+ }
+ continue
+ }
+
+ if catArgs.LongListing {
+ fmt.Printf("%v:\n", zkPath)
+ }
+ decoded := ""
+ if catArgs.DecodeProto {
+ decoded, err = topo.DecodeContent(zkPath, data, false)
+ if err != nil {
+ log.Warningf("cat: cannot proto decode %v: %v", zkPath, err)
+ decoded = string(data)
+ }
+ } else {
+ decoded = string(data)
+ }
+ fmt.Print(decoded)
+ if len(decoded) > 0 && decoded[len(decoded)-1] != '\n' && (term.IsTerminal(int(os.Stdout.Fd())) || catArgs.LongListing) {
+ fmt.Print("\n")
+ }
+ }
+ if hasError {
+ return fmt.Errorf("cat: some paths had errors")
+ }
+ return nil
+}
+
+func init() {
+ Cat.Flags().BoolVarP(&catArgs.LongListing, "longListing", "l", false, "long listing")
+ Cat.Flags().BoolVarP(&catArgs.Force, "force", "f", false, "no warning on nonexistent node")
+ Cat.Flags().BoolVarP(&catArgs.DecodeProto, "decodeProto", "p", false, "decode proto files and display them as text")
+
+ Root.AddCommand(Cat)
+}
diff --git a/go/cmd/zk/command/chmod.go b/go/cmd/zk/command/chmod.go
new file mode 100644
index 00000000000..39125d618c4
--- /dev/null
+++ b/go/cmd/zk/command/chmod.go
@@ -0,0 +1,91 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreedto in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package command
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/cmd/zk/internal/zkfilepath"
+ "vitess.io/vitess/go/cmd/zk/internal/zkfs"
+ "vitess.io/vitess/go/vt/log"
+ "vitess.io/vitess/go/vt/topo/zk2topo"
+)
+
+var Chmod = &cobra.Command{
+ Use: "chmod ",
+ Example: `zk chmod n-mode /zk/path
+zk chmod n+mode /zk/path`,
+ Args: cobra.MinimumNArgs(2),
+ RunE: commandChmod,
+}
+
+func commandChmod(cmd *cobra.Command, args []string) error {
+ mode := cmd.Flags().Arg(0)
+ if mode[0] != 'n' {
+ return fmt.Errorf("chmod: invalid mode")
+ }
+
+ addPerms := false
+ if mode[1] == '+' {
+ addPerms = true
+ } else if mode[1] != '-' {
+ return fmt.Errorf("chmod: invalid mode")
+ }
+
+ permMask := zkfs.ParsePermMode(mode[2:])
+
+ resolved, err := zk2topo.ResolveWildcards(cmd.Context(), fs.Conn, cmd.Flags().Args()[1:])
+ if err != nil {
+ return fmt.Errorf("chmod: invalid wildcards: %w", err)
+ }
+ if len(resolved) == 0 {
+ // the wildcards didn't result in anything, we're done
+ return nil
+ }
+
+ hasError := false
+ for _, arg := range resolved {
+ zkPath := zkfilepath.Clean(arg)
+ aclv, _, err := fs.Conn.GetACL(cmd.Context(), zkPath)
+ if err != nil {
+ hasError = true
+ log.Warningf("chmod: cannot set access %v: %v", zkPath, err)
+ continue
+ }
+ if addPerms {
+ aclv[0].Perms |= permMask
+ } else {
+ aclv[0].Perms &= ^permMask
+ }
+ err = fs.Conn.SetACL(cmd.Context(), zkPath, aclv, -1)
+ if err != nil {
+ hasError = true
+ log.Warningf("chmod: cannot set access %v: %v", zkPath, err)
+ continue
+ }
+ }
+ if hasError {
+ return fmt.Errorf("chmod: some paths had errors")
+ }
+ return nil
+}
+
+func init() {
+ Root.AddCommand(Chmod)
+}
diff --git a/go/cmd/zk/command/cp.go b/go/cmd/zk/command/cp.go
new file mode 100644
index 00000000000..e89486413ea
--- /dev/null
+++ b/go/cmd/zk/command/cp.go
@@ -0,0 +1,43 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreedto in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package command
+
+import "github.com/spf13/cobra"
+
+var Cp = &cobra.Command{
+ Use: "cp ",
+ Example: `zk cp /zk/path .
+zk cp ./config /zk/path/config
+
+# Trailing slash indicates directory
+zk cp ./config /zk/path/`,
+ Args: cobra.MinimumNArgs(2),
+ RunE: commandCp,
+}
+
+func commandCp(cmd *cobra.Command, args []string) error {
+ switch cmd.Flags().NArg() {
+ case 2:
+ return fs.CopyContext(cmd.Context(), cmd.Flags().Arg(0), cmd.Flags().Arg(1))
+ default:
+ return fs.MultiCopyContext(cmd.Context(), cmd.Flags().Args())
+ }
+}
+
+func init() {
+ Root.AddCommand(Cp)
+}
diff --git a/go/cmd/zk/command/edit.go b/go/cmd/zk/command/edit.go
new file mode 100644
index 00000000000..ec4b74c4b62
--- /dev/null
+++ b/go/cmd/zk/command/edit.go
@@ -0,0 +1,101 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreedto in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package command
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "os/exec"
+ "path"
+ "time"
+
+ "github.com/spf13/cobra"
+ "github.com/z-division/go-zookeeper/zk"
+
+ "vitess.io/vitess/go/cmd/zk/internal/zkfilepath"
+ "vitess.io/vitess/go/vt/log"
+)
+
+var (
+ editArgs = struct {
+ Force bool
+ }{}
+
+ Edit = &cobra.Command{
+ Use: "edit ",
+ Short: "Create a local copy, edit, and write changes back to cell.",
+ Args: cobra.ExactArgs(1),
+ RunE: commandEdit,
+ }
+)
+
+func commandEdit(cmd *cobra.Command, args []string) error {
+ arg := cmd.Flags().Arg(0)
+ zkPath := zkfilepath.Clean(arg)
+ data, stat, err := fs.Conn.Get(cmd.Context(), zkPath)
+ if err != nil {
+ if !editArgs.Force || err != zk.ErrNoNode {
+ log.Warningf("edit: cannot access %v: %v", zkPath, err)
+ }
+ return fmt.Errorf("edit: cannot access %v: %v", zkPath, err)
+ }
+
+ name := path.Base(zkPath)
+ tmpPath := fmt.Sprintf("/tmp/zk-edit-%v-%v", name, time.Now().UnixNano())
+ f, err := os.Create(tmpPath)
+ if err == nil {
+ _, err = f.Write(data)
+ f.Close()
+ }
+ if err != nil {
+ return fmt.Errorf("edit: cannot write file %v", err)
+ }
+
+ editor := exec.Command(os.Getenv("EDITOR"), tmpPath)
+ editor.Stdin = os.Stdin
+ editor.Stdout = os.Stdout
+ editor.Stderr = os.Stderr
+ err = editor.Run()
+ if err != nil {
+ os.Remove(tmpPath)
+ return fmt.Errorf("edit: cannot start $EDITOR: %v", err)
+ }
+
+ fileData, err := os.ReadFile(tmpPath)
+ if err != nil {
+ os.Remove(tmpPath)
+ return fmt.Errorf("edit: cannot read file %v", err)
+ }
+
+ if !bytes.Equal(fileData, data) {
+ // data changed - update if we can
+ _, err = fs.Conn.Set(cmd.Context(), zkPath, fileData, stat.Version)
+ if err != nil {
+ os.Remove(tmpPath)
+ return fmt.Errorf("edit: cannot write zk file %v", err)
+ }
+ }
+ os.Remove(tmpPath)
+ return nil
+}
+
+func init() {
+ Edit.Flags().BoolVarP(&editArgs.Force, "force", "f", false, "no warning on nonexistent node")
+
+ Root.AddCommand(Edit)
+}
diff --git a/go/cmd/zk/command/ls.go b/go/cmd/zk/command/ls.go
new file mode 100644
index 00000000000..83c1d31363b
--- /dev/null
+++ b/go/cmd/zk/command/ls.go
@@ -0,0 +1,153 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreedto in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package command
+
+import (
+ "fmt"
+ "path"
+ "sort"
+ "sync"
+
+ "github.com/spf13/cobra"
+ "github.com/z-division/go-zookeeper/zk"
+
+ "vitess.io/vitess/go/cmd/zk/internal/zkfilepath"
+ "vitess.io/vitess/go/vt/log"
+ "vitess.io/vitess/go/vt/topo/zk2topo"
+)
+
+var (
+ lsArgs = struct {
+ LongListing bool
+ DirectoryListing bool
+ Force bool
+ RecursiveListing bool
+ }{}
+
+ Ls = &cobra.Command{
+ Use: "ls ",
+ Example: `zk ls /zk
+zk ls -l /zk
+
+# List directory node itself)
+zk ls -ld /zk
+
+# Recursive (expensive)
+zk ls -R /zk`,
+ Args: cobra.MinimumNArgs(1),
+ RunE: commandLs,
+ }
+)
+
+func commandLs(cmd *cobra.Command, args []string) error {
+ resolved, err := zk2topo.ResolveWildcards(cmd.Context(), fs.Conn, cmd.Flags().Args())
+ if err != nil {
+ return fmt.Errorf("ls: invalid wildcards: %v", err)
+ }
+ if len(resolved) == 0 {
+ // the wildcards didn't result in anything, we're
+ // done.
+ return nil
+ }
+
+ hasError := false
+ needsHeader := len(resolved) > 1 && !lsArgs.DirectoryListing
+ for _, arg := range resolved {
+ zkPath := zkfilepath.Clean(arg)
+ var children []string
+ var err error
+ isDir := true
+ if lsArgs.DirectoryListing {
+ children = []string{""}
+ isDir = false
+ } else if lsArgs.RecursiveListing {
+ children, err = zk2topo.ChildrenRecursive(cmd.Context(), fs.Conn, zkPath)
+ } else {
+ children, _, err = fs.Conn.Children(cmd.Context(), zkPath)
+ // Assume this is a file node if it has no children.
+ if len(children) == 0 {
+ children = []string{""}
+ isDir = false
+ }
+ }
+ if err != nil {
+ hasError = true
+ if !lsArgs.Force || err != zk.ErrNoNode {
+ log.Warningf("ls: cannot access %v: %v", zkPath, err)
+ }
+ }
+
+ // Show the full path when it helps.
+ showFullPath := false
+ if lsArgs.RecursiveListing {
+ showFullPath = true
+ } else if lsArgs.LongListing && (lsArgs.DirectoryListing || !isDir) {
+ showFullPath = true
+ }
+ if needsHeader {
+ fmt.Printf("%v:\n", zkPath)
+ }
+ if len(children) > 0 {
+ if lsArgs.LongListing && isDir {
+ fmt.Printf("total: %v\n", len(children))
+ }
+ sort.Strings(children)
+ stats := make([]*zk.Stat, len(children))
+ wg := sync.WaitGroup{}
+ f := func(i int) {
+ localPath := path.Join(zkPath, children[i])
+ _, stat, err := fs.Conn.Exists(cmd.Context(), localPath)
+ if err != nil {
+ if !lsArgs.Force || err != zk.ErrNoNode {
+ log.Warningf("ls: cannot access: %v: %v", localPath, err)
+ }
+ } else {
+ stats[i] = stat
+ }
+ wg.Done()
+ }
+ for i := range children {
+ wg.Add(1)
+ go f(i)
+ }
+ wg.Wait()
+
+ for i, child := range children {
+ localPath := path.Join(zkPath, child)
+ if stat := stats[i]; stat != nil {
+ fmt.Println(zkfilepath.Format(stat, localPath, showFullPath, lsArgs.LongListing))
+ }
+ }
+ }
+ if needsHeader {
+ fmt.Println()
+ }
+ }
+ if hasError {
+ return fmt.Errorf("ls: some paths had errors")
+ }
+ return nil
+}
+
+func init() {
+ Ls.Flags().BoolVarP(&lsArgs.LongListing, "longlisting", "l", false, "long listing")
+ Ls.Flags().BoolVarP(&lsArgs.DirectoryListing, "directorylisting", "d", false, "list directory instead of contents")
+ Ls.Flags().BoolVarP(&lsArgs.Force, "force", "f", false, "no warning on nonexistent node")
+ Ls.Flags().BoolVarP(&lsArgs.RecursiveListing, "recursivelisting", "R", false, "recursive listing")
+
+ Root.AddCommand(Ls)
+}
diff --git a/go/cmd/zk/command/rm.go b/go/cmd/zk/command/rm.go
new file mode 100644
index 00000000000..5e5b5f4c494
--- /dev/null
+++ b/go/cmd/zk/command/rm.go
@@ -0,0 +1,97 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreedto in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package command
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/spf13/cobra"
+ "github.com/z-division/go-zookeeper/zk"
+
+ "vitess.io/vitess/go/cmd/zk/internal/zkfilepath"
+ "vitess.io/vitess/go/vt/log"
+ "vitess.io/vitess/go/vt/topo/zk2topo"
+)
+
+var (
+ rmArgs = struct {
+ Force bool
+ RecursiveDelete bool
+ }{}
+
+ Rm = &cobra.Command{
+ Use: "rm ",
+ Example: `zk rm /zk/path
+
+# Recursive.
+zk rm -R /zk/path
+
+# No error on nonexistent node.
+zk rm -f /zk/path`,
+ Args: cobra.MinimumNArgs(1),
+ RunE: commandRm,
+ }
+)
+
+func commandRm(cmd *cobra.Command, args []string) error {
+ if rmArgs.RecursiveDelete {
+ for _, arg := range cmd.Flags().Args() {
+ zkPath := zkfilepath.Clean(arg)
+ if strings.Count(zkPath, "/") < 2 {
+ return fmt.Errorf("rm: overly general path: %v", zkPath)
+ }
+ }
+ }
+
+ resolved, err := zk2topo.ResolveWildcards(cmd.Context(), fs.Conn, cmd.Flags().Args())
+ if err != nil {
+ return fmt.Errorf("rm: invalid wildcards: %v", err)
+ }
+ if len(resolved) == 0 {
+ // the wildcards didn't result in anything, we're done
+ return nil
+ }
+
+ hasError := false
+ for _, arg := range resolved {
+ zkPath := zkfilepath.Clean(arg)
+ var err error
+ if rmArgs.RecursiveDelete {
+ err = zk2topo.DeleteRecursive(cmd.Context(), fs.Conn, zkPath, -1)
+ } else {
+ err = fs.Conn.Delete(cmd.Context(), zkPath, -1)
+ }
+ if err != nil && (!rmArgs.Force || err != zk.ErrNoNode) {
+ hasError = true
+ log.Warningf("rm: cannot delete %v: %v", zkPath, err)
+ }
+ }
+ if hasError {
+ // to be consistent with the command line 'rm -f', return
+ // 0 if using 'zk rm -f' and the file doesn't exist.
+ return fmt.Errorf("rm: some paths had errors")
+ }
+ return nil
+}
+
+func init() {
+ Rm.Flags().BoolVarP(&rmArgs.Force, "force", "f", false, "no warning on nonexistent node")
+ Rm.Flags().BoolVarP(&rmArgs.RecursiveDelete, "recursivedelete", "r", false, "recursive delete")
+
+ Root.AddCommand(Rm)
+}
diff --git a/go/cmd/zk/command/root.go b/go/cmd/zk/command/root.go
new file mode 100644
index 00000000000..f3f02e7d4f2
--- /dev/null
+++ b/go/cmd/zk/command/root.go
@@ -0,0 +1,66 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreedto in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package command
+
+import (
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/acl"
+ "vitess.io/vitess/go/cmd/zk/internal/zkfs"
+ "vitess.io/vitess/go/vt/log"
+ "vitess.io/vitess/go/vt/logutil"
+ "vitess.io/vitess/go/vt/topo/zk2topo"
+)
+
+var (
+ fs *zkfs.FS
+ server string
+
+ Root = &cobra.Command{
+ Use: "zk",
+ Short: "zk is a tool for wrangling zookeeper.",
+ Long: `zk is a tool for wrangling zookeeper.
+
+It tries to mimic unix file system commands wherever possible, but
+there are some slight differences in flag handling.
+
+The zk tool looks for the address of the cluster in /etc/zookeeper/zk_client.conf,
+or the file specified in the ZK_CLIENT_CONFIG environment variable.
+
+The local cell may be overridden with the ZK_CLIENT_LOCAL_CELL environment
+variable.`,
+ PersistentPreRun: func(cmd *cobra.Command, args []string) {
+ logutil.PurgeLogs()
+
+ // Connect to the server.
+ fs = &zkfs.FS{
+ Conn: zk2topo.Connect(server),
+ }
+ },
+ PersistentPostRun: func(cmd *cobra.Command, args []string) {
+ logutil.Flush()
+ },
+ }
+)
+
+func init() {
+ Root.Flags().StringVar(&server, "server", server, "server(s) to connect to")
+
+ log.RegisterFlags(Root.Flags())
+ logutil.RegisterFlags(Root.Flags())
+ acl.RegisterFlags(Root.Flags())
+}
diff --git a/go/cmd/zk/command/stat.go b/go/cmd/zk/command/stat.go
new file mode 100644
index 00000000000..713a68a3d4e
--- /dev/null
+++ b/go/cmd/zk/command/stat.go
@@ -0,0 +1,88 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreedto in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package command
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+ "github.com/z-division/go-zookeeper/zk"
+
+ "vitess.io/vitess/go/cmd/zk/internal/zkfilepath"
+ "vitess.io/vitess/go/cmd/zk/internal/zkfs"
+ "vitess.io/vitess/go/vt/log"
+ "vitess.io/vitess/go/vt/topo/zk2topo"
+)
+
+var (
+ statArgs = struct {
+ Force bool
+ }{}
+ Stat = &cobra.Command{
+ Use: "stat ",
+ Args: cobra.MinimumNArgs(1),
+ RunE: commandStat,
+ }
+)
+
+func commandStat(cmd *cobra.Command, args []string) error {
+ resolved, err := zk2topo.ResolveWildcards(cmd.Context(), fs.Conn, cmd.Flags().Args())
+ if err != nil {
+ return fmt.Errorf("stat: invalid wildcards: %v", err)
+ }
+ if len(resolved) == 0 {
+ // the wildcards didn't result in anything, we're done
+ return nil
+ }
+
+ hasError := false
+ for _, arg := range resolved {
+ zkPath := zkfilepath.Clean(arg)
+ acls, stat, err := fs.Conn.GetACL(cmd.Context(), zkPath)
+ if stat == nil {
+ err = fmt.Errorf("no such node")
+ }
+ if err != nil {
+ hasError = true
+ if !statArgs.Force || err != zk.ErrNoNode {
+ log.Warningf("stat: cannot access %v: %v", zkPath, err)
+ }
+ continue
+ }
+ fmt.Printf("Path: %s\n", zkPath)
+ fmt.Printf("Created: %s\n", zk2topo.Time(stat.Ctime).Format(zkfilepath.TimeFmtMicro))
+ fmt.Printf("Modified: %s\n", zk2topo.Time(stat.Mtime).Format(zkfilepath.TimeFmtMicro))
+ fmt.Printf("Size: %v\n", stat.DataLength)
+ fmt.Printf("Children: %v\n", stat.NumChildren)
+ fmt.Printf("Version: %v\n", stat.Version)
+ fmt.Printf("Ephemeral: %v\n", stat.EphemeralOwner)
+ fmt.Printf("ACL:\n")
+ for _, acl := range acls {
+ fmt.Printf(" %v:%v %v\n", acl.Scheme, acl.ID, zkfs.FormatACL(acl))
+ }
+ }
+ if hasError {
+ return fmt.Errorf("stat: some paths had errors")
+ }
+ return nil
+}
+
+func init() {
+ Stat.Flags().BoolVarP(&statArgs.Force, "force", "f", false, "no warning on nonexistent node")
+
+ Root.AddCommand(Stat)
+}
diff --git a/go/cmd/zk/command/touch.go b/go/cmd/zk/command/touch.go
new file mode 100644
index 00000000000..76c390cf169
--- /dev/null
+++ b/go/cmd/zk/command/touch.go
@@ -0,0 +1,93 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreedto in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package command
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+ "github.com/z-division/go-zookeeper/zk"
+
+ "vitess.io/vitess/go/cmd/zk/internal/zkfilepath"
+ "vitess.io/vitess/go/vt/topo/zk2topo"
+)
+
+var (
+ touchArgs = struct {
+ CreateParents bool
+ TouchOnly bool
+ }{}
+
+ Touch = &cobra.Command{
+ Use: "touch ",
+ Short: "Change node access time.",
+ Long: `Change node access time.
+
+NOTE: There is no mkdir - just touch a node.
+The disntinction between file and directory is not relevant in zookeeper.`,
+ Example: `zk touch /zk/path
+
+# Don't create, just touch timestamp.
+zk touch -c /zk/path
+
+# Create all parts necessary (think mkdir -p).
+zk touch -p /zk/path`,
+ Args: cobra.ExactArgs(1),
+ RunE: commandTouch,
+ }
+)
+
+func commandTouch(cmd *cobra.Command, args []string) error {
+ zkPath := zkfilepath.Clean(cmd.Flags().Arg(0))
+ var (
+ version int32 = -1
+ create = false
+ )
+
+ data, stat, err := fs.Conn.Get(cmd.Context(), zkPath)
+ switch {
+ case err == nil:
+ version = stat.Version
+ case err == zk.ErrNoNode:
+ create = true
+ default:
+ return fmt.Errorf("touch: cannot access %v: %v", zkPath, err)
+ }
+
+ switch {
+ case !create:
+ _, err = fs.Conn.Set(cmd.Context(), zkPath, data, version)
+ case touchArgs.TouchOnly:
+ return fmt.Errorf("touch: no such path %v", zkPath)
+ case touchArgs.CreateParents:
+ _, err = zk2topo.CreateRecursive(cmd.Context(), fs.Conn, zkPath, data, 0, zk.WorldACL(zk.PermAll), 10)
+ default:
+ _, err = fs.Conn.Create(cmd.Context(), zkPath, data, 0, zk.WorldACL(zk.PermAll))
+ }
+
+ if err != nil {
+ return fmt.Errorf("touch: cannot modify %v: %v", zkPath, err)
+ }
+ return nil
+}
+
+func init() {
+ Touch.Flags().BoolVarP(&touchArgs.CreateParents, "createparent", "p", false, "create parents")
+ Touch.Flags().BoolVarP(&touchArgs.TouchOnly, "touchonly", "c", false, "touch only - don't create")
+
+ Root.AddCommand(Touch)
+}
diff --git a/go/cmd/zk/command/unzip.go b/go/cmd/zk/command/unzip.go
new file mode 100644
index 00000000000..f4c800e0533
--- /dev/null
+++ b/go/cmd/zk/command/unzip.go
@@ -0,0 +1,81 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreedto in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package command
+
+import (
+ "archive/zip"
+ "fmt"
+ "io"
+ "path"
+ "strings"
+
+ "github.com/spf13/cobra"
+ "github.com/z-division/go-zookeeper/zk"
+
+ "vitess.io/vitess/go/vt/topo/zk2topo"
+)
+
+var Unzip = &cobra.Command{
+ Use: "unzip ",
+ Example: `zk unzip zktree.zip /
+zk unzip zktree.zip /zk/prefix`,
+ Args: cobra.ExactArgs(1),
+ RunE: commandUnzip,
+}
+
+func commandUnzip(cmd *cobra.Command, args []string) error {
+ srcPath, dstPath := cmd.Flags().Arg(0), cmd.Flags().Arg(1)
+
+ if !strings.HasSuffix(srcPath, ".zip") {
+ return fmt.Errorf("zip: need to specify src .zip path: %v", srcPath)
+ }
+
+ zipReader, err := zip.OpenReader(srcPath)
+ if err != nil {
+ return fmt.Errorf("zip: error %v", err)
+ }
+ defer zipReader.Close()
+
+ for _, zf := range zipReader.File {
+ rc, err := zf.Open()
+ if err != nil {
+ return fmt.Errorf("unzip: error %v", err)
+ }
+ data, err := io.ReadAll(rc)
+ if err != nil {
+ return fmt.Errorf("unzip: failed reading archive: %v", err)
+ }
+ zkPath := zf.Name
+ if dstPath != "/" {
+ zkPath = path.Join(dstPath, zkPath)
+ }
+ _, err = zk2topo.CreateRecursive(cmd.Context(), fs.Conn, zkPath, data, 0, zk.WorldACL(zk.PermAll), 10)
+ if err != nil && err != zk.ErrNodeExists {
+ return fmt.Errorf("unzip: zk create failed: %v", err)
+ }
+ _, err = fs.Conn.Set(cmd.Context(), zkPath, data, -1)
+ if err != nil {
+ return fmt.Errorf("unzip: zk set failed: %v", err)
+ }
+ rc.Close()
+ }
+ return nil
+}
+
+func init() {
+ Root.AddCommand(Unzip)
+}
diff --git a/go/cmd/zk/command/wait.go b/go/cmd/zk/command/wait.go
new file mode 100644
index 00000000000..864f6e83626
--- /dev/null
+++ b/go/cmd/zk/command/wait.go
@@ -0,0 +1,78 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreedto in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package command
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+ "github.com/z-division/go-zookeeper/zk"
+
+ "vitess.io/vitess/go/cmd/zk/internal/zkfilepath"
+)
+
+var (
+ waitArgs = struct {
+ ExitIfExists bool
+ }{}
+
+ Wait = &cobra.Command{
+ Use: "wait ",
+ Short: "Sets a watch on the node and then waits for an event to fire.",
+ Example: ` # Wait for node change or creation.
+zk wait /zk/path
+
+# Trailing slash waits on children.
+zk wait /zk/path/children/`,
+ Args: cobra.ExactArgs(1),
+ RunE: commandWait,
+ }
+)
+
+func commandWait(cmd *cobra.Command, args []string) error {
+ zkPath := cmd.Flags().Arg(0)
+ isDir := zkPath[len(zkPath)-1] == '/'
+ zkPath = zkfilepath.Clean(zkPath)
+
+ var wait <-chan zk.Event
+ var err error
+ if isDir {
+ _, _, wait, err = fs.Conn.ChildrenW(cmd.Context(), zkPath)
+ } else {
+ _, _, wait, err = fs.Conn.GetW(cmd.Context(), zkPath)
+ }
+ if err != nil {
+ if err == zk.ErrNoNode {
+ _, _, wait, _ = fs.Conn.ExistsW(cmd.Context(), zkPath)
+ } else {
+ return fmt.Errorf("wait: error %v: %v", zkPath, err)
+ }
+ } else {
+ if waitArgs.ExitIfExists {
+ return fmt.Errorf("already exists: %v", zkPath)
+ }
+ }
+ event := <-wait
+ fmt.Printf("event: %v\n", event)
+ return nil
+}
+
+func init() {
+ Wait.Flags().BoolVarP(&waitArgs.ExitIfExists, "exit", "e", false, "exit if the path already exists")
+
+ Root.AddCommand(Wait)
+}
diff --git a/go/cmd/zk/command/watch.go b/go/cmd/zk/command/watch.go
new file mode 100644
index 00000000000..eb28cc29ca2
--- /dev/null
+++ b/go/cmd/zk/command/watch.go
@@ -0,0 +1,86 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreedto in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package command
+
+import (
+ "fmt"
+
+ "github.com/spf13/cobra"
+ "github.com/z-division/go-zookeeper/zk"
+
+ "vitess.io/vitess/go/cmd/zk/internal/zkfilepath"
+ "vitess.io/vitess/go/vt/log"
+)
+
+var Watch = &cobra.Command{
+ Use: "watch ",
+ Short: "Watches for changes to nodes and prints events as they occur.",
+ Example: `watch /zk/path`,
+ Args: cobra.MinimumNArgs(1),
+ RunE: commandWatch,
+}
+
+func commandWatch(cmd *cobra.Command, args []string) error {
+ eventChan := make(chan zk.Event, 16)
+ for _, arg := range cmd.Flags().Args() {
+ zkPath := zkfilepath.Clean(arg)
+ _, _, watch, err := fs.Conn.GetW(cmd.Context(), zkPath)
+ if err != nil {
+ return fmt.Errorf("watch error: %v", err)
+ }
+ go func() {
+ eventChan <- <-watch
+ }()
+ }
+
+ for {
+ select {
+ case <-cmd.Context().Done():
+ return nil
+ case event := <-eventChan:
+ log.Infof("watch: event %v: %v", event.Path, event)
+ if event.Type == zk.EventNodeDataChanged {
+ data, stat, watch, err := fs.Conn.GetW(cmd.Context(), event.Path)
+ if err != nil {
+ return fmt.Errorf("ERROR: failed to watch %v", err)
+ }
+ log.Infof("watch: %v %v\n", event.Path, stat)
+ println(data)
+ go func() {
+ eventChan <- <-watch
+ }()
+ } else if event.State == zk.StateDisconnected {
+ return nil
+ } else if event.Type == zk.EventNodeDeleted {
+ log.Infof("watch: %v deleted\n", event.Path)
+ } else {
+ // Most likely a session event - try t
+ _, _, watch, err := fs.Conn.GetW(cmd.Context(), event.Path)
+ if err != nil {
+ return fmt.Errorf("ERROR: failed to watch %v", err)
+ }
+ go func() {
+ eventChan <- <-watch
+ }()
+ }
+ }
+ }
+}
+
+func init() {
+ Root.AddCommand(Watch)
+}
diff --git a/go/cmd/zk/command/zip.go b/go/cmd/zk/command/zip.go
new file mode 100644
index 00000000000..b765f5bb00e
--- /dev/null
+++ b/go/cmd/zk/command/zip.go
@@ -0,0 +1,116 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreedto in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package command
+
+import (
+ "archive/zip"
+ "fmt"
+ "os"
+ "path"
+ "strings"
+ "sync"
+
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/cmd/zk/internal/zkfilepath"
+ "vitess.io/vitess/go/cmd/zk/internal/zkfs"
+ "vitess.io/vitess/go/vt/topo/zk2topo"
+)
+
+var Zip = &cobra.Command{
+ Use: "zip [ ...] ",
+ Short: "Store a zk tree in a zip archive.",
+ Long: `Store a zk tree in a zip archive.
+
+Note this won't be immediately useful to the local filesystem since znodes can have data and children;
+that is, even "directories" can contain data.`,
+ Args: cobra.MinimumNArgs(2),
+ RunE: commandZip,
+}
+
+func commandZip(cmd *cobra.Command, args []string) error {
+ posargs := cmd.Flags().Args()
+ dstPath := posargs[len(posargs)-1]
+ paths := posargs[:len(posargs)-1]
+ if !strings.HasSuffix(dstPath, ".zip") {
+ return fmt.Errorf("zip: need to specify destination .zip path: %v", dstPath)
+ }
+ zipFile, err := os.Create(dstPath)
+ if err != nil {
+ return fmt.Errorf("zip: error %v", err)
+ }
+
+ wg := sync.WaitGroup{}
+ items := make(chan *zkfs.Item, 64)
+ for _, arg := range paths {
+ zkPath := zkfilepath.Clean(arg)
+ children, err := zk2topo.ChildrenRecursive(cmd.Context(), fs.Conn, zkPath)
+ if err != nil {
+ return fmt.Errorf("zip: error %v", err)
+ }
+ for _, child := range children {
+ toAdd := path.Join(zkPath, child)
+ wg.Add(1)
+ go func() {
+ data, stat, err := fs.Conn.Get(cmd.Context(), toAdd)
+ items <- &zkfs.Item{
+ Path: toAdd,
+ Data: data,
+ Stat: stat,
+ Err: err,
+ }
+ wg.Done()
+ }()
+ }
+ }
+ go func() {
+ wg.Wait()
+ close(items)
+ }()
+
+ zipWriter := zip.NewWriter(zipFile)
+ for item := range items {
+ path, data, stat, err := item.Path, item.Data, item.Stat, item.Err
+ if err != nil {
+ return fmt.Errorf("zip: get failed: %v", err)
+ }
+ // Skip ephemerals - not sure why you would archive them.
+ if stat.EphemeralOwner > 0 {
+ continue
+ }
+ fi := &zip.FileHeader{Name: path, Method: zip.Deflate}
+ fi.Modified = zk2topo.Time(stat.Mtime)
+ f, err := zipWriter.CreateHeader(fi)
+ if err != nil {
+ return fmt.Errorf("zip: create failed: %v", err)
+ }
+ _, err = f.Write(data)
+ if err != nil {
+ return fmt.Errorf("zip: create failed: %v", err)
+ }
+ }
+ err = zipWriter.Close()
+ if err != nil {
+ return fmt.Errorf("zip: close failed: %v", err)
+ }
+ zipFile.Close()
+ return nil
+}
+
+func init() {
+ Root.AddCommand(Zip)
+}
diff --git a/go/cmd/zk/docgen/main.go b/go/cmd/zk/docgen/main.go
new file mode 100644
index 00000000000..b8a7bde3d14
--- /dev/null
+++ b/go/cmd/zk/docgen/main.go
@@ -0,0 +1,37 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/cmd/internal/docgen"
+ "vitess.io/vitess/go/cmd/zk/command"
+)
+
+func main() {
+ var dir string
+ cmd := cobra.Command{
+ Use: "docgen [-d ]",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return docgen.GenerateMarkdownTree(command.Root, dir)
+ },
+ }
+
+ cmd.Flags().StringVarP(&dir, "dir", "d", "doc", "output directory to write documentation")
+ _ = cmd.Execute()
+}
diff --git a/go/cmd/zk/internal/zkfilepath/zkfilepath.go b/go/cmd/zk/internal/zkfilepath/zkfilepath.go
new file mode 100644
index 00000000000..7febc7a9677
--- /dev/null
+++ b/go/cmd/zk/internal/zkfilepath/zkfilepath.go
@@ -0,0 +1,75 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreedto in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package zkfilepath provides filepath utilities specialized to zookeeper.
+package zkfilepath
+
+import (
+ "fmt"
+ "path"
+ "strings"
+
+ "github.com/z-division/go-zookeeper/zk"
+
+ "vitess.io/vitess/go/vt/topo/zk2topo"
+)
+
+const (
+ TimeFmt = "2006-01-02 15:04:05"
+ TimeFmtMicro = "2006-01-02 15:04:05.000000"
+)
+
+// Clean returns the shortest path name of a zookeeper path after trimming
+// trailing slashes.
+func Clean(zkPath string) string {
+ if zkPath != "/" {
+ zkPath = strings.TrimSuffix(zkPath, "/")
+ }
+
+ return path.Clean(zkPath)
+}
+
+// Format returns a path formatted to a canonical string.
+func Format(stat *zk.Stat, zkPath string, showFullPath bool, longListing bool) string {
+ var name, perms string
+
+ if !showFullPath {
+ name = path.Base(zkPath)
+ } else {
+ name = zkPath
+ }
+
+ if longListing {
+ if stat.NumChildren > 0 {
+ // FIXME(msolomon) do permissions check?
+ perms = "drwxrwxrwx"
+ if stat.DataLength > 0 {
+ // give a visual indication that this node has data as well as children
+ perms = "nrw-rw-rw-"
+ }
+ } else if stat.EphemeralOwner != 0 {
+ perms = "erw-rw-rw-"
+ } else {
+ perms = "-rw-rw-rw-"
+ }
+ // always print the Local version of the time. zookeeper's
+ // go / C library would return a local time anyway, but
+ // might as well be sure.
+ return fmt.Sprintf("%v %v %v % 8v % 20v %v\n", perms, "zk", "zk", stat.DataLength, zk2topo.Time(stat.Mtime).Local().Format(TimeFmt), name)
+ } else {
+ return fmt.Sprintf("%v\n", name)
+ }
+}
diff --git a/go/cmd/zk/internal/zkfs/zkfs.go b/go/cmd/zk/internal/zkfs/zkfs.go
new file mode 100644
index 00000000000..9bab19ec1e4
--- /dev/null
+++ b/go/cmd/zk/internal/zkfs/zkfs.go
@@ -0,0 +1,174 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreedto in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package zkfs provides utilities for working with zookeepr in a filesystem-like manner.
+package zkfs
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "path"
+ "strings"
+ "syscall"
+
+ "github.com/z-division/go-zookeeper/zk"
+
+ "vitess.io/vitess/go/cmd/zk/internal/zkfilepath"
+ "vitess.io/vitess/go/vt/topo/zk2topo"
+)
+
+// FS wraps a zk2topo connection to provide FS utility methods.
+type FS struct {
+ Conn *zk2topo.ZkConn
+}
+
+// CopyContext copies the contents of src to dst.
+func (fs *FS) CopyContext(ctx context.Context, src, dst string) error {
+ dstIsDir := dst[len(dst)-1] == '/'
+ src = zkfilepath.Clean(src)
+ dst = zkfilepath.Clean(dst)
+
+ if !IsFile(src) && !IsFile(dst) {
+ return fmt.Errorf("cp: neither src nor dst is a /zk file")
+ }
+
+ data, err := fs.ReadContext(ctx, src)
+ if err != nil {
+ return fmt.Errorf("cp: cannot read %v: %v", src, err)
+ }
+
+ // If we are copying to a local directory - say '.', make the filename
+ // the same as the source.
+ if !IsFile(dst) {
+ fileInfo, err := os.Stat(dst)
+ if err != nil {
+ if err.(*os.PathError).Err != syscall.ENOENT {
+ return fmt.Errorf("cp: cannot stat %v: %v", dst, err)
+ }
+ } else if fileInfo.IsDir() {
+ dst = path.Join(dst, path.Base(src))
+ }
+ } else if dstIsDir {
+ // If we are copying into zk, interpret trailing slash as treating the
+ // dst as a directory.
+ dst = path.Join(dst, path.Base(src))
+ }
+ if err := fs.WriteContext(ctx, dst, data); err != nil {
+ return fmt.Errorf("cp: cannot write %v: %v", dst, err)
+ }
+ return nil
+}
+
+// MultiCopyContext copies the contents of multiple sources to a single dst directory.
+func (fs *FS) MultiCopyContext(ctx context.Context, args []string) error {
+ dstPath := args[len(args)-1]
+ if dstPath[len(dstPath)-1] != '/' {
+ // In multifile context, dstPath must be a directory.
+ dstPath += "/"
+ }
+
+ for _, srcPath := range args[:len(args)-1] {
+ if err := fs.CopyContext(ctx, srcPath, dstPath); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// ReadContext reads the data stored at path.
+func (fs *FS) ReadContext(ctx context.Context, path string) (data []byte, err error) {
+ if !IsFile(path) {
+ data, _, err = fs.Conn.Get(ctx, path)
+ return data, err
+ }
+
+ file, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+
+ data, err = io.ReadAll(file)
+ return data, err
+}
+
+// WriteContext writes the given data to path.
+func (fs *FS) WriteContext(ctx context.Context, path string, data []byte) (err error) {
+ if IsFile(path) {
+ _, err = fs.Conn.Set(ctx, path, data, -1)
+ if err == zk.ErrNoNode {
+ _, err = zk2topo.CreateRecursive(ctx, fs.Conn, path, data, 0, zk.WorldACL(zk.PermAll), 10)
+ }
+ return err
+ }
+ return os.WriteFile(path, []byte(data), 0666)
+}
+
+var (
+ charPermMap map[string]int32
+ permCharMap map[int32]string
+)
+
+func init() {
+ charPermMap = map[string]int32{
+ "r": zk.PermRead,
+ "w": zk.PermWrite,
+ "d": zk.PermDelete,
+ "c": zk.PermCreate,
+ "a": zk.PermAdmin,
+ }
+ permCharMap = make(map[int32]string)
+ for c, p := range charPermMap {
+ permCharMap[p] = c
+ }
+}
+
+// FormatACL returns a string representation of a zookeeper ACL permission.
+func FormatACL(acl zk.ACL) string {
+ s := ""
+
+ for _, perm := range []int32{zk.PermRead, zk.PermWrite, zk.PermDelete, zk.PermCreate, zk.PermAdmin} {
+ if acl.Perms&perm != 0 {
+ s += permCharMap[perm]
+ } else {
+ s += "-"
+ }
+ }
+ return s
+}
+
+// IsFile returns true if the path is a zk type of file.
+func IsFile(path string) bool {
+ return strings.HasPrefix(path, "/zk")
+}
+
+// ParsePermMode parses the mode string as a perm mask.
+func ParsePermMode(mode string) (mask int32) {
+ for _, c := range mode[2:] {
+ mask |= charPermMap[string(c)]
+ }
+
+ return mask
+}
+
+// Item represents an item in a zookeeper filesystem.
+type Item struct {
+ Path string
+ Data []byte
+ Stat *zk.Stat
+ Err error
+}
diff --git a/go/cmd/zk/zkcmd.go b/go/cmd/zk/zkcmd.go
index 8d456f6b081..f03ac41c6ef 100644
--- a/go/cmd/zk/zkcmd.go
+++ b/go/cmd/zk/zkcmd.go
@@ -17,156 +17,17 @@ limitations under the License.
package main
import (
- "archive/zip"
- "bytes"
"context"
- "fmt"
- "io"
"os"
- "os/exec"
"os/signal"
- "path"
- "sort"
- "strings"
- "sync"
- "syscall"
- "time"
- "github.com/spf13/pflag"
- "github.com/z-division/go-zookeeper/zk"
- "golang.org/x/term"
-
- "vitess.io/vitess/go/acl"
+ "vitess.io/vitess/go/cmd/zk/command"
"vitess.io/vitess/go/exit"
"vitess.io/vitess/go/vt/log"
- "vitess.io/vitess/go/vt/logutil"
- "vitess.io/vitess/go/vt/topo"
- "vitess.io/vitess/go/vt/topo/zk2topo"
-)
-
-var doc = `
-zk is a tool for wrangling the zookeeper
-
-It tries to mimic unix file system commands wherever possible, but
-there are some slight differences in flag handling.
-
-zk -h - provide help on overriding cell selection
-
-zk addAuth digest user:pass
-
-zk cat /zk/path
-zk cat -l /zk/path1 /zk/path2 (list filename before file data)
-
-zk chmod n-mode /zk/path
-zk chmod n+mode /zk/path
-
-zk cp /zk/path .
-zk cp ./config /zk/path/config
-zk cp ./config /zk/path/ (trailing slash indicates directory)
-
-zk edit /zk/path (create a local copy, edit and write changes back to cell)
-
-zk ls /zk
-zk ls -l /zk
-zk ls -ld /zk (list directory node itself)
-zk ls -R /zk (recursive, expensive)
-
-zk stat /zk/path
-
-zk touch /zk/path
-zk touch -c /zk/path (don't create, just touch timestamp)
-zk touch -p /zk/path (create all parts necessary, think mkdir -p)
-NOTE: there is no mkdir - just touch a node. The distinction
-between file and directory is just not relevant in zookeeper.
-
-zk rm /zk/path
-zk rm -r /zk/path (recursive)
-zk rm -f /zk/path (no error on nonexistent node)
-
-zk wait /zk/path (wait for node change or creation)
-zk wait /zk/path/children/ (trailing slash waits on children)
-
-zk watch /zk/path (print changes)
-
-zk unzip zktree.zip /
-zk unzip zktree.zip /zk/prefix
-
-zk zip /zk/root zktree.zip
-NOTE: zip file can't be dumped to the file system since znodes
-can have data and children.
-
-The zk tool looks for the address of the cluster in /etc/zookeeper/zk_client.conf,
-or the file specified in the ZK_CLIENT_CONFIG environment variable.
-
-The local cell may be overridden with the ZK_CLIENT_LOCAL_CELL environment
-variable.
-`
-
-const (
- timeFmt = "2006-01-02 15:04:05"
- timeFmtMicro = "2006-01-02 15:04:05.000000"
)
-type cmdFunc func(ctx context.Context, subFlags *pflag.FlagSet, args []string) error
-
-var cmdMap map[string]cmdFunc
-var zconn *zk2topo.ZkConn
-var server string
-
-func init() {
- cmdMap = map[string]cmdFunc{
- "addAuth": cmdAddAuth,
- "cat": cmdCat,
- "chmod": cmdChmod,
- "cp": cmdCp,
- "edit": cmdEdit,
- "ls": cmdLs,
- "rm": cmdRm,
- "stat": cmdStat,
- "touch": cmdTouch,
- "unzip": cmdUnzip,
- "wait": cmdWait,
- "watch": cmdWatch,
- "zip": cmdZip,
- }
-}
-
func main() {
defer exit.Recover()
- defer logutil.Flush()
- pflag.StringVar(&server, "server", server, "server(s) to connect to")
- // handling case of --help & -h
- var help bool
- pflag.BoolVarP(&help, "help", "h", false, "display usage and exit")
- log.RegisterFlags(pflag.CommandLine)
- logutil.RegisterFlags(pflag.CommandLine)
- acl.RegisterFlags(pflag.CommandLine)
- pflag.CommandLine.Usage = func() {
- fmt.Fprint(os.Stderr, doc)
- pflag.Usage()
- }
-
- pflag.Parse()
- logutil.PurgeLogs()
-
- if help || pflag.Arg(0) == "help" {
- pflag.Usage()
- os.Exit(0)
- }
-
- // if no zk command is provided after --server then we need to print doc & usage both
- args := pflag.Args()
- if len(args) == 0 {
- pflag.CommandLine.Usage()
- exit.Return(1)
- }
- cmdName := args[0]
- args = args[1:]
- cmd, ok := cmdMap[cmdName]
- if !ok {
- log.Exitf("Unknown command %v", cmdName)
- }
- subFlags := pflag.NewFlagSet(cmdName, pflag.ContinueOnError)
// Create a context for the command, cancel it if we get a signal.
ctx, cancel := context.WithCancel(context.Background())
@@ -177,848 +38,9 @@ func main() {
cancel()
}()
- // Connect to the server.
- zconn = zk2topo.Connect(server)
-
// Run the command.
- if err := cmd(ctx, subFlags, args); err != nil {
+ if err := command.Root.ExecuteContext(ctx); err != nil {
log.Error(err)
exit.Return(1)
}
}
-
-func fixZkPath(zkPath string) string {
- if zkPath != "/" {
- zkPath = strings.TrimSuffix(zkPath, "/")
- }
- return path.Clean(zkPath)
-}
-
-func isZkFile(path string) bool {
- return strings.HasPrefix(path, "/zk")
-}
-
-func cmdWait(ctx context.Context, subFlags *pflag.FlagSet, args []string) error {
- var exitIfExists bool
- subFlags.BoolVarP(&exitIfExists, "exit", "e", false, "exit if the path already exists")
-
- if err := subFlags.Parse(args); err != nil {
- return err
- }
-
- if subFlags.NArg() != 1 {
- return fmt.Errorf("wait: can only wait for one path")
- }
- zkPath := subFlags.Arg(0)
- isDir := zkPath[len(zkPath)-1] == '/'
- zkPath = fixZkPath(zkPath)
-
- var wait <-chan zk.Event
- var err error
- if isDir {
- _, _, wait, err = zconn.ChildrenW(ctx, zkPath)
- } else {
- _, _, wait, err = zconn.GetW(ctx, zkPath)
- }
- if err != nil {
- if err == zk.ErrNoNode {
- _, _, wait, _ = zconn.ExistsW(ctx, zkPath)
- } else {
- return fmt.Errorf("wait: error %v: %v", zkPath, err)
- }
- } else {
- if exitIfExists {
- return fmt.Errorf("already exists: %v", zkPath)
- }
- }
- event := <-wait
- fmt.Printf("event: %v\n", event)
- return nil
-}
-
-// Watch for changes to the node.
-func cmdWatch(ctx context.Context, subFlags *pflag.FlagSet, args []string) error {
- if err := subFlags.Parse(args); err != nil {
- return err
- }
-
- eventChan := make(chan zk.Event, 16)
- for _, arg := range subFlags.Args() {
- zkPath := fixZkPath(arg)
- _, _, watch, err := zconn.GetW(ctx, zkPath)
- if err != nil {
- return fmt.Errorf("watch error: %v", err)
- }
- go func() {
- eventChan <- <-watch
- }()
- }
-
- for {
- select {
- case <-ctx.Done():
- return nil
- case event := <-eventChan:
- log.Infof("watch: event %v: %v", event.Path, event)
- if event.Type == zk.EventNodeDataChanged {
- data, stat, watch, err := zconn.GetW(ctx, event.Path)
- if err != nil {
- return fmt.Errorf("ERROR: failed to watch %v", err)
- }
- log.Infof("watch: %v %v\n", event.Path, stat)
- println(data)
- go func() {
- eventChan <- <-watch
- }()
- } else if event.State == zk.StateDisconnected {
- return nil
- } else if event.Type == zk.EventNodeDeleted {
- log.Infof("watch: %v deleted\n", event.Path)
- } else {
- // Most likely a session event - try t
- _, _, watch, err := zconn.GetW(ctx, event.Path)
- if err != nil {
- return fmt.Errorf("ERROR: failed to watch %v", err)
- }
- go func() {
- eventChan <- <-watch
- }()
- }
- }
- }
-}
-
-func cmdLs(ctx context.Context, subFlags *pflag.FlagSet, args []string) error {
- var (
- longListing bool
- directoryListing bool
- force bool
- recursiveListing bool
- )
- subFlags.BoolVarP(&longListing, "longlisting", "l", false, "long listing")
- subFlags.BoolVarP(&directoryListing, "directorylisting", "d", false, "list directory instead of contents")
- subFlags.BoolVarP(&force, "force", "f", false, "no warning on nonexistent node")
- subFlags.BoolVarP(&recursiveListing, "recursivelisting", "R", false, "recursive listing")
-
- if err := subFlags.Parse(args); err != nil {
- return err
- }
- if subFlags.NArg() == 0 {
- return fmt.Errorf("ls: no path specified")
- }
- resolved, err := zk2topo.ResolveWildcards(ctx, zconn, subFlags.Args())
- if err != nil {
- return fmt.Errorf("ls: invalid wildcards: %v", err)
- }
- if len(resolved) == 0 {
- // the wildcards didn't result in anything, we're
- // done.
- return nil
- }
-
- hasError := false
- needsHeader := len(resolved) > 1 && !directoryListing
- for _, arg := range resolved {
- zkPath := fixZkPath(arg)
- var children []string
- var err error
- isDir := true
- if directoryListing {
- children = []string{""}
- isDir = false
- } else if recursiveListing {
- children, err = zk2topo.ChildrenRecursive(ctx, zconn, zkPath)
- } else {
- children, _, err = zconn.Children(ctx, zkPath)
- // Assume this is a file node if it has no children.
- if len(children) == 0 {
- children = []string{""}
- isDir = false
- }
- }
- if err != nil {
- hasError = true
- if !force || err != zk.ErrNoNode {
- log.Warningf("ls: cannot access %v: %v", zkPath, err)
- }
- }
-
- // Show the full path when it helps.
- showFullPath := false
- if recursiveListing {
- showFullPath = true
- } else if longListing && (directoryListing || !isDir) {
- showFullPath = true
- }
- if needsHeader {
- fmt.Printf("%v:\n", zkPath)
- }
- if len(children) > 0 {
- if longListing && isDir {
- fmt.Printf("total: %v\n", len(children))
- }
- sort.Strings(children)
- stats := make([]*zk.Stat, len(children))
- wg := sync.WaitGroup{}
- f := func(i int) {
- localPath := path.Join(zkPath, children[i])
- _, stat, err := zconn.Exists(ctx, localPath)
- if err != nil {
- if !force || err != zk.ErrNoNode {
- log.Warningf("ls: cannot access: %v: %v", localPath, err)
- }
- } else {
- stats[i] = stat
- }
- wg.Done()
- }
- for i := range children {
- wg.Add(1)
- go f(i)
- }
- wg.Wait()
-
- for i, child := range children {
- localPath := path.Join(zkPath, child)
- if stat := stats[i]; stat != nil {
- fmtPath(stat, localPath, showFullPath, longListing)
- }
- }
- }
- if needsHeader {
- fmt.Println()
- }
- }
- if hasError {
- return fmt.Errorf("ls: some paths had errors")
- }
- return nil
-}
-
-func fmtPath(stat *zk.Stat, zkPath string, showFullPath bool, longListing bool) {
- var name, perms string
-
- if !showFullPath {
- name = path.Base(zkPath)
- } else {
- name = zkPath
- }
-
- if longListing {
- if stat.NumChildren > 0 {
- // FIXME(msolomon) do permissions check?
- perms = "drwxrwxrwx"
- if stat.DataLength > 0 {
- // give a visual indication that this node has data as well as children
- perms = "nrw-rw-rw-"
- }
- } else if stat.EphemeralOwner != 0 {
- perms = "erw-rw-rw-"
- } else {
- perms = "-rw-rw-rw-"
- }
- // always print the Local version of the time. zookeeper's
- // go / C library would return a local time anyway, but
- // might as well be sure.
- fmt.Printf("%v %v %v % 8v % 20v %v\n", perms, "zk", "zk", stat.DataLength, zk2topo.Time(stat.Mtime).Local().Format(timeFmt), name)
- } else {
- fmt.Printf("%v\n", name)
- }
-}
-
-func cmdTouch(ctx context.Context, subFlags *pflag.FlagSet, args []string) error {
- var (
- createParents bool
- touchOnly bool
- )
-
- subFlags.BoolVarP(&createParents, "createparent", "p", false, "create parents")
- subFlags.BoolVarP(&touchOnly, "touchonly", "c", false, "touch only - don't create")
-
- if err := subFlags.Parse(args); err != nil {
- return err
- }
- if subFlags.NArg() != 1 {
- return fmt.Errorf("touch: need to specify exactly one path")
- }
-
- zkPath := fixZkPath(subFlags.Arg(0))
-
- var (
- version int32 = -1
- create = false
- )
-
- data, stat, err := zconn.Get(ctx, zkPath)
- switch {
- case err == nil:
- version = stat.Version
- case err == zk.ErrNoNode:
- create = true
- default:
- return fmt.Errorf("touch: cannot access %v: %v", zkPath, err)
- }
-
- switch {
- case !create:
- _, err = zconn.Set(ctx, zkPath, data, version)
- case touchOnly:
- return fmt.Errorf("touch: no such path %v", zkPath)
- case createParents:
- _, err = zk2topo.CreateRecursive(ctx, zconn, zkPath, data, 0, zk.WorldACL(zk.PermAll), 10)
- default:
- _, err = zconn.Create(ctx, zkPath, data, 0, zk.WorldACL(zk.PermAll))
- }
-
- if err != nil {
- return fmt.Errorf("touch: cannot modify %v: %v", zkPath, err)
- }
- return nil
-}
-
-func cmdRm(ctx context.Context, subFlags *pflag.FlagSet, args []string) error {
- var (
- force bool
- recursiveDelete bool
- )
- subFlags.BoolVarP(&force, "force", "f", false, "no warning on nonexistent node")
- subFlags.BoolVarP(&recursiveDelete, "recursivedelete", "r", false, "recursive delete")
-
- if err := subFlags.Parse(args); err != nil {
- return err
- }
-
- if subFlags.NArg() == 0 {
- return fmt.Errorf("rm: no path specified")
- }
-
- if recursiveDelete {
- for _, arg := range subFlags.Args() {
- zkPath := fixZkPath(arg)
- if strings.Count(zkPath, "/") < 2 {
- return fmt.Errorf("rm: overly general path: %v", zkPath)
- }
- }
- }
-
- resolved, err := zk2topo.ResolveWildcards(ctx, zconn, subFlags.Args())
- if err != nil {
- return fmt.Errorf("rm: invalid wildcards: %v", err)
- }
- if len(resolved) == 0 {
- // the wildcards didn't result in anything, we're done
- return nil
- }
-
- hasError := false
- for _, arg := range resolved {
- zkPath := fixZkPath(arg)
- var err error
- if recursiveDelete {
- err = zk2topo.DeleteRecursive(ctx, zconn, zkPath, -1)
- } else {
- err = zconn.Delete(ctx, zkPath, -1)
- }
- if err != nil && (!force || err != zk.ErrNoNode) {
- hasError = true
- log.Warningf("rm: cannot delete %v: %v", zkPath, err)
- }
- }
- if hasError {
- // to be consistent with the command line 'rm -f', return
- // 0 if using 'zk rm -f' and the file doesn't exist.
- return fmt.Errorf("rm: some paths had errors")
- }
- return nil
-}
-
-func cmdAddAuth(ctx context.Context, subFlags *pflag.FlagSet, args []string) error {
- if err := subFlags.Parse(args); err != nil {
- return err
- }
- if subFlags.NArg() < 2 {
- return fmt.Errorf("addAuth: expected args ")
- }
- scheme, auth := subFlags.Arg(0), subFlags.Arg(1)
- return zconn.AddAuth(ctx, scheme, []byte(auth))
-}
-
-func cmdCat(ctx context.Context, subFlags *pflag.FlagSet, args []string) error {
- var (
- longListing bool
- force bool
- decodeProto bool
- )
- subFlags.BoolVarP(&longListing, "longListing", "l", false, "long listing")
- subFlags.BoolVarP(&force, "force", "f", false, "no warning on nonexistent node")
- subFlags.BoolVarP(&decodeProto, "decodeProto", "p", false, "decode proto files and display them as text")
-
- if err := subFlags.Parse(args); err != nil {
- return err
- }
- if subFlags.NArg() == 0 {
- return fmt.Errorf("cat: no path specified")
- }
- resolved, err := zk2topo.ResolveWildcards(ctx, zconn, subFlags.Args())
- if err != nil {
- return fmt.Errorf("cat: invalid wildcards: %v", err)
- }
- if len(resolved) == 0 {
- // the wildcards didn't result in anything, we're done
- return nil
- }
-
- hasError := false
- for _, arg := range resolved {
- zkPath := fixZkPath(arg)
- data, _, err := zconn.Get(ctx, zkPath)
- if err != nil {
- hasError = true
- if !force || err != zk.ErrNoNode {
- log.Warningf("cat: cannot access %v: %v", zkPath, err)
- }
- continue
- }
-
- if longListing {
- fmt.Printf("%v:\n", zkPath)
- }
- decoded := ""
- if decodeProto {
- decoded, err = topo.DecodeContent(zkPath, data, false)
- if err != nil {
- log.Warningf("cat: cannot proto decode %v: %v", zkPath, err)
- decoded = string(data)
- }
- } else {
- decoded = string(data)
- }
- fmt.Print(decoded)
- if len(decoded) > 0 && decoded[len(decoded)-1] != '\n' && (term.IsTerminal(int(os.Stdout.Fd())) || longListing) {
- fmt.Print("\n")
- }
- }
- if hasError {
- return fmt.Errorf("cat: some paths had errors")
- }
- return nil
-}
-
-func cmdEdit(ctx context.Context, subFlags *pflag.FlagSet, args []string) error {
- var force bool
- subFlags.BoolVarP(&force, "force", "f", false, "no warning on nonexistent node")
-
- if err := subFlags.Parse(args); err != nil {
- return err
- }
- if subFlags.NArg() == 0 {
- return fmt.Errorf("edit: no path specified")
- }
- arg := subFlags.Arg(0)
- zkPath := fixZkPath(arg)
- data, stat, err := zconn.Get(ctx, zkPath)
- if err != nil {
- if !force || err != zk.ErrNoNode {
- log.Warningf("edit: cannot access %v: %v", zkPath, err)
- }
- return fmt.Errorf("edit: cannot access %v: %v", zkPath, err)
- }
-
- name := path.Base(zkPath)
- tmpPath := fmt.Sprintf("/tmp/zk-edit-%v-%v", name, time.Now().UnixNano())
- f, err := os.Create(tmpPath)
- if err == nil {
- _, err = f.Write(data)
- f.Close()
- }
- if err != nil {
- return fmt.Errorf("edit: cannot write file %v", err)
- }
-
- cmd := exec.Command(os.Getenv("EDITOR"), tmpPath)
- cmd.Stdin = os.Stdin
- cmd.Stdout = os.Stdout
- cmd.Stderr = os.Stderr
- err = cmd.Run()
- if err != nil {
- os.Remove(tmpPath)
- return fmt.Errorf("edit: cannot start $EDITOR: %v", err)
- }
-
- fileData, err := os.ReadFile(tmpPath)
- if err != nil {
- os.Remove(tmpPath)
- return fmt.Errorf("edit: cannot read file %v", err)
- }
-
- if !bytes.Equal(fileData, data) {
- // data changed - update if we can
- _, err = zconn.Set(ctx, zkPath, fileData, stat.Version)
- if err != nil {
- os.Remove(tmpPath)
- return fmt.Errorf("edit: cannot write zk file %v", err)
- }
- }
- os.Remove(tmpPath)
- return nil
-}
-
-func cmdStat(ctx context.Context, subFlags *pflag.FlagSet, args []string) error {
- var force bool
- subFlags.BoolVarP(&force, "force", "f", false, "no warning on nonexistent node")
-
- if err := subFlags.Parse(args); err != nil {
- return err
- }
-
- if subFlags.NArg() == 0 {
- return fmt.Errorf("stat: no path specified")
- }
-
- resolved, err := zk2topo.ResolveWildcards(ctx, zconn, subFlags.Args())
- if err != nil {
- return fmt.Errorf("stat: invalid wildcards: %v", err)
- }
- if len(resolved) == 0 {
- // the wildcards didn't result in anything, we're done
- return nil
- }
-
- hasError := false
- for _, arg := range resolved {
- zkPath := fixZkPath(arg)
- acls, stat, err := zconn.GetACL(ctx, zkPath)
- if stat == nil {
- err = fmt.Errorf("no such node")
- }
- if err != nil {
- hasError = true
- if !force || err != zk.ErrNoNode {
- log.Warningf("stat: cannot access %v: %v", zkPath, err)
- }
- continue
- }
- fmt.Printf("Path: %s\n", zkPath)
- fmt.Printf("Created: %s\n", zk2topo.Time(stat.Ctime).Format(timeFmtMicro))
- fmt.Printf("Modified: %s\n", zk2topo.Time(stat.Mtime).Format(timeFmtMicro))
- fmt.Printf("Size: %v\n", stat.DataLength)
- fmt.Printf("Children: %v\n", stat.NumChildren)
- fmt.Printf("Version: %v\n", stat.Version)
- fmt.Printf("Ephemeral: %v\n", stat.EphemeralOwner)
- fmt.Printf("ACL:\n")
- for _, acl := range acls {
- fmt.Printf(" %v:%v %v\n", acl.Scheme, acl.ID, fmtACL(acl))
- }
- }
- if hasError {
- return fmt.Errorf("stat: some paths had errors")
- }
- return nil
-}
-
-var charPermMap map[string]int32
-var permCharMap map[int32]string
-
-func init() {
- charPermMap = map[string]int32{
- "r": zk.PermRead,
- "w": zk.PermWrite,
- "d": zk.PermDelete,
- "c": zk.PermCreate,
- "a": zk.PermAdmin,
- }
- permCharMap = make(map[int32]string)
- for c, p := range charPermMap {
- permCharMap[p] = c
- }
-}
-
-func fmtACL(acl zk.ACL) string {
- s := ""
-
- for _, perm := range []int32{zk.PermRead, zk.PermWrite, zk.PermDelete, zk.PermCreate, zk.PermAdmin} {
- if acl.Perms&perm != 0 {
- s += permCharMap[perm]
- } else {
- s += "-"
- }
- }
- return s
-}
-
-func cmdChmod(ctx context.Context, subFlags *pflag.FlagSet, args []string) error {
- if err := subFlags.Parse(args); err != nil {
- return err
- }
- if subFlags.NArg() < 2 {
- return fmt.Errorf("chmod: no permission specified")
- }
- mode := subFlags.Arg(0)
- if mode[0] != 'n' {
- return fmt.Errorf("chmod: invalid mode")
- }
-
- addPerms := false
- if mode[1] == '+' {
- addPerms = true
- } else if mode[1] != '-' {
- return fmt.Errorf("chmod: invalid mode")
- }
-
- var permMask int32
- for _, c := range mode[2:] {
- permMask |= charPermMap[string(c)]
- }
-
- resolved, err := zk2topo.ResolveWildcards(ctx, zconn, subFlags.Args()[1:])
- if err != nil {
- return fmt.Errorf("chmod: invalid wildcards: %v", err)
- }
- if len(resolved) == 0 {
- // the wildcards didn't result in anything, we're done
- return nil
- }
-
- hasError := false
- for _, arg := range resolved {
- zkPath := fixZkPath(arg)
- aclv, _, err := zconn.GetACL(ctx, zkPath)
- if err != nil {
- hasError = true
- log.Warningf("chmod: cannot set access %v: %v", zkPath, err)
- continue
- }
- if addPerms {
- aclv[0].Perms |= permMask
- } else {
- aclv[0].Perms &= ^permMask
- }
- err = zconn.SetACL(ctx, zkPath, aclv, -1)
- if err != nil {
- hasError = true
- log.Warningf("chmod: cannot set access %v: %v", zkPath, err)
- continue
- }
- }
- if hasError {
- return fmt.Errorf("chmod: some paths had errors")
- }
- return nil
-}
-
-func cmdCp(ctx context.Context, subFlags *pflag.FlagSet, args []string) error {
- if err := subFlags.Parse(args); err != nil {
- return err
- }
- switch {
- case subFlags.NArg() < 2:
- return fmt.Errorf("cp: need to specify source and destination paths")
- case subFlags.NArg() == 2:
- return fileCp(ctx, args[0], args[1])
- default:
- return multiFileCp(ctx, args)
- }
-}
-
-func getPathData(ctx context.Context, filePath string) ([]byte, error) {
- if isZkFile(filePath) {
- data, _, err := zconn.Get(ctx, filePath)
- return data, err
- }
- var err error
- file, err := os.Open(filePath)
- if err == nil {
- data, err := io.ReadAll(file)
- if err == nil {
- return data, err
- }
- }
- return nil, err
-}
-
-func setPathData(ctx context.Context, filePath string, data []byte) error {
- if isZkFile(filePath) {
- _, err := zconn.Set(ctx, filePath, data, -1)
- if err == zk.ErrNoNode {
- _, err = zk2topo.CreateRecursive(ctx, zconn, filePath, data, 0, zk.WorldACL(zk.PermAll), 10)
- }
- return err
- }
- return os.WriteFile(filePath, []byte(data), 0666)
-}
-
-func fileCp(ctx context.Context, srcPath, dstPath string) error {
- dstIsDir := dstPath[len(dstPath)-1] == '/'
- srcPath = fixZkPath(srcPath)
- dstPath = fixZkPath(dstPath)
-
- if !isZkFile(srcPath) && !isZkFile(dstPath) {
- return fmt.Errorf("cp: neither src nor dst is a /zk file: exitting")
- }
-
- data, err := getPathData(ctx, srcPath)
- if err != nil {
- return fmt.Errorf("cp: cannot read %v: %v", srcPath, err)
- }
-
- // If we are copying to a local directory - say '.', make the filename
- // the same as the source.
- if !isZkFile(dstPath) {
- fileInfo, err := os.Stat(dstPath)
- if err != nil {
- if err.(*os.PathError).Err != syscall.ENOENT {
- return fmt.Errorf("cp: cannot stat %v: %v", dstPath, err)
- }
- } else if fileInfo.IsDir() {
- dstPath = path.Join(dstPath, path.Base(srcPath))
- }
- } else if dstIsDir {
- // If we are copying into zk, interpret trailing slash as treating the
- // dstPath as a directory.
- dstPath = path.Join(dstPath, path.Base(srcPath))
- }
- if err := setPathData(ctx, dstPath, data); err != nil {
- return fmt.Errorf("cp: cannot write %v: %v", dstPath, err)
- }
- return nil
-}
-
-func multiFileCp(ctx context.Context, args []string) error {
- dstPath := args[len(args)-1]
- if dstPath[len(dstPath)-1] != '/' {
- // In multifile context, dstPath must be a directory.
- dstPath += "/"
- }
-
- for _, srcPath := range args[:len(args)-1] {
- if err := fileCp(ctx, srcPath, dstPath); err != nil {
- return err
- }
- }
- return nil
-}
-
-type zkItem struct {
- path string
- data []byte
- stat *zk.Stat
- err error
-}
-
-// Store a zk tree in a zip archive. This won't be immediately useful to
-// zip tools since even "directories" can contain data.
-func cmdZip(ctx context.Context, subFlags *pflag.FlagSet, args []string) error {
- if err := subFlags.Parse(args); err != nil {
- return err
- }
- if subFlags.NArg() < 2 {
- return fmt.Errorf("zip: need to specify source and destination paths")
- }
-
- dstPath := subFlags.Arg(subFlags.NArg() - 1)
- paths := subFlags.Args()[:len(args)-1]
- if !strings.HasSuffix(dstPath, ".zip") {
- return fmt.Errorf("zip: need to specify destination .zip path: %v", dstPath)
- }
- zipFile, err := os.Create(dstPath)
- if err != nil {
- return fmt.Errorf("zip: error %v", err)
- }
-
- wg := sync.WaitGroup{}
- items := make(chan *zkItem, 64)
- for _, arg := range paths {
- zkPath := fixZkPath(arg)
- children, err := zk2topo.ChildrenRecursive(ctx, zconn, zkPath)
- if err != nil {
- return fmt.Errorf("zip: error %v", err)
- }
- for _, child := range children {
- toAdd := path.Join(zkPath, child)
- wg.Add(1)
- go func() {
- data, stat, err := zconn.Get(ctx, toAdd)
- items <- &zkItem{toAdd, data, stat, err}
- wg.Done()
- }()
- }
- }
- go func() {
- wg.Wait()
- close(items)
- }()
-
- zipWriter := zip.NewWriter(zipFile)
- for item := range items {
- path, data, stat, err := item.path, item.data, item.stat, item.err
- if err != nil {
- return fmt.Errorf("zip: get failed: %v", err)
- }
- // Skip ephemerals - not sure why you would archive them.
- if stat.EphemeralOwner > 0 {
- continue
- }
- fi := &zip.FileHeader{Name: path, Method: zip.Deflate}
- fi.Modified = zk2topo.Time(stat.Mtime)
- f, err := zipWriter.CreateHeader(fi)
- if err != nil {
- return fmt.Errorf("zip: create failed: %v", err)
- }
- _, err = f.Write(data)
- if err != nil {
- return fmt.Errorf("zip: create failed: %v", err)
- }
- }
- err = zipWriter.Close()
- if err != nil {
- return fmt.Errorf("zip: close failed: %v", err)
- }
- zipFile.Close()
- return nil
-}
-
-func cmdUnzip(ctx context.Context, subFlags *pflag.FlagSet, args []string) error {
- if err := subFlags.Parse(args); err != nil {
- return err
- }
- if subFlags.NArg() != 2 {
- return fmt.Errorf("zip: need to specify source and destination paths")
- }
-
- srcPath, dstPath := subFlags.Arg(0), subFlags.Arg(1)
-
- if !strings.HasSuffix(srcPath, ".zip") {
- return fmt.Errorf("zip: need to specify src .zip path: %v", srcPath)
- }
-
- zipReader, err := zip.OpenReader(srcPath)
- if err != nil {
- return fmt.Errorf("zip: error %v", err)
- }
- defer zipReader.Close()
-
- for _, zf := range zipReader.File {
- rc, err := zf.Open()
- if err != nil {
- return fmt.Errorf("unzip: error %v", err)
- }
- data, err := io.ReadAll(rc)
- if err != nil {
- return fmt.Errorf("unzip: failed reading archive: %v", err)
- }
- zkPath := zf.Name
- if dstPath != "/" {
- zkPath = path.Join(dstPath, zkPath)
- }
- _, err = zk2topo.CreateRecursive(ctx, zconn, zkPath, data, 0, zk.WorldACL(zk.PermAll), 10)
- if err != nil && err != zk.ErrNodeExists {
- return fmt.Errorf("unzip: zk create failed: %v", err)
- }
- _, err = zconn.Set(ctx, zkPath, data, -1)
- if err != nil {
- return fmt.Errorf("unzip: zk set failed: %v", err)
- }
- rc.Close()
- }
- return nil
-}
diff --git a/go/cmd/zkctl/command/init.go b/go/cmd/zkctl/command/init.go
new file mode 100644
index 00000000000..518b4a6239d
--- /dev/null
+++ b/go/cmd/zkctl/command/init.go
@@ -0,0 +1,32 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package command
+
+import "github.com/spf13/cobra"
+
+var Init = &cobra.Command{
+ Use: "init",
+ Short: "Generates a new config and then starts zookeeper.",
+ Args: cobra.ExactArgs(0),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return zkd.Init()
+ },
+}
+
+func init() {
+ Root.AddCommand(Init)
+}
diff --git a/go/cmd/zkctl/command/root.go b/go/cmd/zkctl/command/root.go
new file mode 100644
index 00000000000..3399ed8c4cb
--- /dev/null
+++ b/go/cmd/zkctl/command/root.go
@@ -0,0 +1,63 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package command
+
+import (
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/vt/logutil"
+ "vitess.io/vitess/go/vt/servenv"
+ "vitess.io/vitess/go/vt/zkctl"
+)
+
+var (
+ zkCfg = "6@:3801:3802:3803"
+ myID uint
+ zkExtra []string
+
+ zkd *zkctl.Zkd
+
+ Root = &cobra.Command{
+ Use: "zkctl",
+ Short: "Initializes and controls zookeeper with Vitess-specific configuration.",
+ PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
+ if err := servenv.CobraPreRunE(cmd, args); err != nil {
+ return err
+ }
+
+ zkConfig := zkctl.MakeZkConfigFromString(zkCfg, uint32(myID))
+ zkConfig.Extra = zkExtra
+ zkd = zkctl.NewZkd(zkConfig)
+
+ return nil
+ },
+ PersistentPostRun: func(cmd *cobra.Command, args []string) {
+ logutil.Flush()
+ },
+ }
+)
+
+func init() {
+ Root.PersistentFlags().StringVar(&zkCfg, "zk.cfg", zkCfg,
+ "zkid@server1:leaderPort1:electionPort1:clientPort1,...)")
+ Root.PersistentFlags().UintVar(&myID, "zk.myid", myID,
+ "which server do you want to be? only needed when running multiple instance on one box, otherwise myid is implied by hostname")
+ Root.PersistentFlags().StringArrayVar(&zkExtra, "zk.extra", zkExtra,
+ "extra config line(s) to append verbatim to config (flag can be specified more than once)")
+
+ servenv.MovePersistentFlagsToCobraCommand(Root)
+}
diff --git a/go/cmd/zkctl/command/shutdown.go b/go/cmd/zkctl/command/shutdown.go
new file mode 100644
index 00000000000..7237841e9c1
--- /dev/null
+++ b/go/cmd/zkctl/command/shutdown.go
@@ -0,0 +1,32 @@
+/*
+Copyright 2017 Google Inc.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreedto in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package command
+
+import "github.com/spf13/cobra"
+
+var Shutdown = &cobra.Command{
+ Use: "shutdown",
+ Short: "Terminates a zookeeper server but keeps its data dir intact.",
+ Args: cobra.ExactArgs(0),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return zkd.Shutdown()
+ },
+}
+
+func init() {
+ Root.AddCommand(Shutdown)
+}
diff --git a/go/cmd/zkctl/command/start.go b/go/cmd/zkctl/command/start.go
new file mode 100644
index 00000000000..1ed31d0ed54
--- /dev/null
+++ b/go/cmd/zkctl/command/start.go
@@ -0,0 +1,32 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package command
+
+import "github.com/spf13/cobra"
+
+var Start = &cobra.Command{
+ Use: "start",
+ Short: "Runs an already initialized zookeeper server.",
+ Args: cobra.ExactArgs(0),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return zkd.Start()
+ },
+}
+
+func init() {
+ Root.AddCommand(Start)
+}
diff --git a/go/cmd/zkctl/command/teardown.go b/go/cmd/zkctl/command/teardown.go
new file mode 100644
index 00000000000..14fe7278835
--- /dev/null
+++ b/go/cmd/zkctl/command/teardown.go
@@ -0,0 +1,32 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package command
+
+import "github.com/spf13/cobra"
+
+var Teardown = &cobra.Command{
+ Use: "teardown",
+ Short: "Shuts down the zookeeper server and removes its data dir.",
+ Args: cobra.ExactArgs(0),
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return zkd.Teardown()
+ },
+}
+
+func init() {
+ Root.AddCommand(Teardown)
+}
diff --git a/go/cmd/zkctl/docgen/main.go b/go/cmd/zkctl/docgen/main.go
new file mode 100644
index 00000000000..c35da8930e4
--- /dev/null
+++ b/go/cmd/zkctl/docgen/main.go
@@ -0,0 +1,37 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/cmd/internal/docgen"
+ "vitess.io/vitess/go/cmd/zkctl/command"
+)
+
+func main() {
+ var dir string
+ cmd := cobra.Command{
+ Use: "docgen [-d ]",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return docgen.GenerateMarkdownTree(command.Root, dir)
+ },
+ }
+
+ cmd.Flags().StringVarP(&dir, "dir", "d", "doc", "output directory to write documentation")
+ _ = cmd.Execute()
+}
diff --git a/go/cmd/zkctl/zkctl.go b/go/cmd/zkctl/zkctl.go
index 85ddb3e7e56..b00e3eb4812 100644
--- a/go/cmd/zkctl/zkctl.go
+++ b/go/cmd/zkctl/zkctl.go
@@ -14,71 +14,19 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-// zkctl initializes and controls ZooKeeper with Vitess-specific configuration.
package main
import (
- "github.com/spf13/pflag"
-
+ "vitess.io/vitess/go/cmd/zkctl/command"
"vitess.io/vitess/go/exit"
"vitess.io/vitess/go/vt/log"
- "vitess.io/vitess/go/vt/logutil"
- "vitess.io/vitess/go/vt/servenv"
- "vitess.io/vitess/go/vt/zkctl"
)
-var usage = `
-Commands:
-
- init | start | shutdown | teardown
-`
-
-var (
- zkCfg = "6@:3801:3802:3803"
- myID uint
-)
-
-func registerZkctlFlags(fs *pflag.FlagSet) {
- fs.StringVar(&zkCfg, "zk.cfg", zkCfg,
- "zkid@server1:leaderPort1:electionPort1:clientPort1,...)")
- fs.UintVar(&myID, "zk.myid", myID,
- "which server do you want to be? only needed when running multiple instance on one box, otherwise myid is implied by hostname")
-
-}
-func init() {
- servenv.OnParse(registerZkctlFlags)
-}
-
func main() {
defer exit.Recover()
- defer logutil.Flush()
-
- fs := pflag.NewFlagSet("zkctl", pflag.ExitOnError)
- log.RegisterFlags(fs)
- logutil.RegisterFlags(fs)
- args := servenv.ParseFlagsWithArgs("zkctl")
- zkConfig := zkctl.MakeZkConfigFromString(zkCfg, uint32(myID))
- zkd := zkctl.NewZkd(zkConfig)
-
- action := args[0]
- var err error
- switch action {
- case "init":
- err = zkd.Init()
- case "shutdown":
- err = zkd.Shutdown()
- case "start":
- err = zkd.Start()
- case "teardown":
- err = zkd.Teardown()
- default:
- log.Errorf("invalid action: %v", action)
- log.Errorf(usage)
- exit.Return(1)
- }
- if err != nil {
- log.Errorf("failed %v: %v", action, err)
+ if err := command.Root.Execute(); err != nil {
+ log.Error(err)
exit.Return(1)
}
}
diff --git a/go/cmd/zkctld/cli/zkctld.go b/go/cmd/zkctld/cli/zkctld.go
new file mode 100644
index 00000000000..101f1013722
--- /dev/null
+++ b/go/cmd/zkctld/cli/zkctld.go
@@ -0,0 +1,100 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cli
+
+import (
+ "fmt"
+ "os"
+ "os/signal"
+ "syscall"
+
+ "github.com/spf13/cobra"
+ "github.com/spf13/pflag"
+
+ "vitess.io/vitess/go/acl"
+ "vitess.io/vitess/go/vt/log"
+ "vitess.io/vitess/go/vt/logutil"
+ "vitess.io/vitess/go/vt/servenv"
+ "vitess.io/vitess/go/vt/zkctl"
+)
+
+var (
+ zkCfg = "6@:3801:3802:3803"
+ myID uint
+ zkExtra []string
+
+ Main = &cobra.Command{
+ Use: "zkctld",
+ Short: "zkctld is a daemon that starts or initializes ZooKeeper with Vitess-specific configuration. It will stay running as long as the underlying ZooKeeper server, and will pass along SIGTERM.",
+ Args: cobra.NoArgs,
+ PersistentPreRunE: servenv.CobraPreRunE,
+ PostRun: func(cmd *cobra.Command, args []string) {
+ logutil.Flush()
+ },
+ RunE: run,
+ }
+)
+
+func init() {
+ servenv.OnParse(registerFlags)
+}
+
+func registerFlags(fs *pflag.FlagSet) {
+ fs.StringVar(&zkCfg, "zk.cfg", zkCfg,
+ "zkid@server1:leaderPort1:electionPort1:clientPort1,...)")
+ fs.UintVar(&myID, "zk.myid", myID,
+ "which server do you want to be? only needed when running multiple instance on one box, otherwise myid is implied by hostname")
+ fs.StringArrayVar(&zkExtra, "zk.extra", zkExtra,
+ "extra config line(s) to append verbatim to config (flag can be specified more than once)")
+ acl.RegisterFlags(fs)
+}
+
+func run(cmd *cobra.Command, args []string) error {
+ servenv.Init()
+ zkConfig := zkctl.MakeZkConfigFromString(zkCfg, uint32(myID))
+ zkConfig.Extra = zkExtra
+ zkd := zkctl.NewZkd(zkConfig)
+
+ if zkd.Inited() {
+ log.Infof("already initialized, starting without init...")
+ if err := zkd.Start(); err != nil {
+ return fmt.Errorf("failed start: %v", err)
+ }
+ } else {
+ log.Infof("initializing...")
+ if err := zkd.Init(); err != nil {
+ return fmt.Errorf("failed init: %v", err)
+ }
+ }
+
+ log.Infof("waiting for signal or server shutdown...")
+ sig := make(chan os.Signal, 1)
+ signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM)
+ select {
+ case <-zkd.Done():
+ log.Infof("server shut down on its own")
+ case <-sig:
+ log.Infof("signal received, shutting down server")
+
+ // Action to perform if there is an error
+ if err := zkd.Shutdown(); err != nil {
+ return fmt.Errorf("error during shutdown:%v", err)
+ }
+ }
+
+ return nil
+}
diff --git a/go/cmd/zkctld/docgen/main.go b/go/cmd/zkctld/docgen/main.go
new file mode 100644
index 00000000000..9cf989f37b7
--- /dev/null
+++ b/go/cmd/zkctld/docgen/main.go
@@ -0,0 +1,37 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package main
+
+import (
+ "github.com/spf13/cobra"
+
+ "vitess.io/vitess/go/cmd/internal/docgen"
+ "vitess.io/vitess/go/cmd/zkctld/cli"
+)
+
+func main() {
+ var dir string
+ cmd := cobra.Command{
+ Use: "docgen [-d ]",
+ RunE: func(cmd *cobra.Command, args []string) error {
+ return docgen.GenerateMarkdownTree(cli.Main, dir)
+ },
+ }
+
+ cmd.Flags().StringVarP(&dir, "dir", "d", "doc", "output directory to write documentation")
+ _ = cmd.Execute()
+}
diff --git a/go/cmd/zkctld/zkctld.go b/go/cmd/zkctld/zkctld.go
index dac1866f60f..211b63325eb 100644
--- a/go/cmd/zkctld/zkctld.go
+++ b/go/cmd/zkctld/zkctld.go
@@ -20,74 +20,15 @@ limitations under the License.
package main
import (
- "os"
- "os/signal"
- "syscall"
-
- "github.com/spf13/pflag"
-
- "vitess.io/vitess/go/acl"
+ "vitess.io/vitess/go/cmd/zkctld/cli"
"vitess.io/vitess/go/exit"
"vitess.io/vitess/go/vt/log"
- "vitess.io/vitess/go/vt/logutil"
- "vitess.io/vitess/go/vt/servenv"
- "vitess.io/vitess/go/vt/zkctl"
-)
-
-var (
- zkCfg = "6@:3801:3802:3803"
- myID uint
)
-func init() {
- servenv.OnParse(registerFlags)
-}
-
-func registerFlags(fs *pflag.FlagSet) {
- fs.StringVar(&zkCfg, "zk.cfg", zkCfg,
- "zkid@server1:leaderPort1:electionPort1:clientPort1,...)")
- fs.UintVar(&myID, "zk.myid", myID,
- "which server do you want to be? only needed when running multiple instance on one box, otherwise myid is implied by hostname")
-
- acl.RegisterFlags(fs)
-}
-
func main() {
defer exit.Recover()
- defer logutil.Flush()
-
- servenv.ParseFlags("zkctld")
- servenv.Init()
- zkConfig := zkctl.MakeZkConfigFromString(zkCfg, uint32(myID))
- zkd := zkctl.NewZkd(zkConfig)
-
- if zkd.Inited() {
- log.Infof("already initialized, starting without init...")
- if err := zkd.Start(); err != nil {
- log.Errorf("failed start: %v", err)
- exit.Return(255)
- }
- } else {
- log.Infof("initializing...")
- if err := zkd.Init(); err != nil {
- log.Errorf("failed init: %v", err)
- exit.Return(255)
- }
- }
-
- log.Infof("waiting for signal or server shutdown...")
- sig := make(chan os.Signal, 1)
- signal.Notify(sig, syscall.SIGINT, syscall.SIGTERM)
- select {
- case <-zkd.Done():
- log.Infof("server shut down on its own")
- case <-sig:
- log.Infof("signal received, shutting down server")
-
- // Action to perform if there is an error
- if err := zkd.Shutdown(); err != nil {
- log.Errorf("error during shutdown:%v", err)
- exit.Return(1)
- }
+ if err := cli.Main.Execute(); err != nil {
+ log.Error(err)
+ exit.Return(1)
}
}
diff --git a/go/constants/sidecar/name.go b/go/constants/sidecar/name.go
new file mode 100644
index 00000000000..063452782b7
--- /dev/null
+++ b/go/constants/sidecar/name.go
@@ -0,0 +1,42 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sidecar
+
+import (
+ "sync/atomic"
+)
+
+const (
+ DefaultName = "_vt"
+)
+
+var (
+ // This should be accessed via GetName()
+ sidecarDBName atomic.Value
+)
+
+func init() {
+ sidecarDBName.Store(DefaultName)
+}
+
+func SetName(name string) {
+ sidecarDBName.Store(name)
+}
+
+func GetName() string {
+ return sidecarDBName.Load().(string)
+}
diff --git a/go/constants/sidecar/queries.go b/go/constants/sidecar/queries.go
new file mode 100644
index 00000000000..97fa30ebecc
--- /dev/null
+++ b/go/constants/sidecar/queries.go
@@ -0,0 +1,55 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sidecar
+
+import "vitess.io/vitess/go/vt/sqlparser"
+
+// region unit-test-only
+// This section uses helpers used in tests, but also in
+// go/vt/vtexplain/vtexplain_vttablet.go.
+// Hence, it is here and not in the _test.go file.
+const (
+ createDBQuery = "create database if not exists %s"
+ createTableRegexp = "(?i)CREATE TABLE .* `?\\_vt\\`?..*"
+ alterTableRegexp = "(?i)ALTER TABLE `?\\_vt\\`?..*"
+)
+
+var (
+ DBInitQueries = []string{
+ "use %s",
+ createDBQuery,
+ }
+ // Query patterns to handle in mocks.
+ DBInitQueryPatterns = []string{
+ createTableRegexp,
+ alterTableRegexp,
+ }
+)
+
+// GetCreateQuery returns the CREATE DATABASE SQL statement
+// used to create the sidecar database.
+func GetCreateQuery() string {
+ return sqlparser.BuildParsedQuery(createDBQuery, GetIdentifier()).Query
+}
+
+// GetIdentifier returns the sidecar database name as an SQL
+// identifier string, most importantly this means that it will
+// be properly escaped if/as needed.
+func GetIdentifier() string {
+ ident := sqlparser.NewIdentifierCS(GetName())
+ return sqlparser.String(ident)
+}
diff --git a/go/flags/endtoend/flags_test.go b/go/flags/endtoend/flags_test.go
index 61bc1dacfc3..25cca54caf9 100644
--- a/go/flags/endtoend/flags_test.go
+++ b/go/flags/endtoend/flags_test.go
@@ -41,18 +41,21 @@ var (
//go:embed mysqlctld.txt
mysqlctldTxt string
+ //go:embed topo2topo.txt
+ topo2topoTxt string
+
//go:embed vtaclcheck.txt
vtaclcheckTxt string
+ //go:embed vtcombo.txt
+ vtcomboTxt string
+
//go:embed vtexplain.txt
vtexplainTxt string
//go:embed vtgate.txt
vtgateTxt string
- //go:embed vtgr.txt
- vtgrTxt string
-
//go:embed vttablet.txt
vttabletTxt string
@@ -71,6 +74,9 @@ var (
//go:embed vtctldclient.txt
vtctldclientTxt string
+ //go:embed vtgateclienttest.txt
+ vtgateclienttestTxt string
+
//go:embed vttestserver.txt
vttestserverTxt string
@@ -87,23 +93,25 @@ var (
zkTxt string
helpOutput = map[string]string{
- "mysqlctl": mysqlctlTxt,
- "mysqlctld": mysqlctldTxt,
- "vtaclcheck": vtaclcheckTxt,
- "vtexplain": vtexplainTxt,
- "vtgate": vtgateTxt,
- "vtgr": vtgrTxt,
- "vttablet": vttabletTxt,
- "vttlstest": vttlstestTxt,
- "vtctld": vtctldTxt,
- "vtctlclient": vtctlclientTxt,
- "vtctldclient": vtctldclientTxt,
- "vtorc": vtorcTxt,
- "vttestserver": vttestserverTxt,
- "zkctld": zkctldTxt,
- "vtbackup": vtbackupTxt,
- "zk": zkTxt,
- "zkctl": zkctlTxt,
+ "mysqlctl": mysqlctlTxt,
+ "mysqlctld": mysqlctldTxt,
+ "topo2topo": topo2topoTxt,
+ "vtaclcheck": vtaclcheckTxt,
+ "vtbackup": vtbackupTxt,
+ "vtcombo": vtcomboTxt,
+ "vtctlclient": vtctlclientTxt,
+ "vtctld": vtctldTxt,
+ "vtctldclient": vtctldclientTxt,
+ "vtexplain": vtexplainTxt,
+ "vtgate": vtgateTxt,
+ "vtgateclienttest": vtgateclienttestTxt,
+ "vtorc": vtorcTxt,
+ "vttablet": vttabletTxt,
+ "vttestserver": vttestserverTxt,
+ "vttlstest": vttlstestTxt,
+ "zk": zkTxt,
+ "zkctl": zkctlTxt,
+ "zkctld": zkctldTxt,
}
)
diff --git a/go/flags/endtoend/mysqlctl.txt b/go/flags/endtoend/mysqlctl.txt
index 4af44804749..a8f832d3345 100644
--- a/go/flags/endtoend/mysqlctl.txt
+++ b/go/flags/endtoend/mysqlctl.txt
@@ -1,16 +1,24 @@
-Usage: mysqlctl [global-flags] -- [command-flags]
+`mysqlctl` is a command-line client used for managing `mysqld` instances.
-The commands are listed below. Use 'mysqlctl -- {-h, --help}' for command help.
+It is responsible for bootstrapping tasks such as generating a configuration file for `mysqld` and initializing the instance and its data directory.
+The `mysqld_safe` watchdog is utilized when present.
+This helps ensure that `mysqld` is automatically restarted after failures.
- init [--wait_time=5m] [--init_db_sql_file=]
- init_config
- reinit_config
- teardown [--wait_time=5m] [--force]
- start [--wait_time=5m]
- shutdown [--wait_time=5m]
- position
+Usage:
+ mysqlctl [command]
-Global flags:
+Available Commands:
+ completion Generate the autocompletion script for the specified shell
+ help Help about any command
+ init Initializes the directory structure and starts mysqld.
+ init_config Initializes the directory structure, creates my.cnf file, but does not start mysqld.
+ position Compute operations on replication positions
+ reinit_config Reinitializes my.cnf file with new server_id.
+ shutdown Shuts down mysqld, without removing any files.
+ start Starts mysqld on an already 'init'-ed directory.
+ teardown Shuts mysqld down and removes the directory.
+
+Flags:
--alsologtostderr log to standard error as well as files
--app_idle_timeout duration Idle timeout for app connections (default 1m0s)
--app_pool_size int Size of the connection pool for app connections (default 40)
@@ -52,7 +60,7 @@ Global flags:
--db_tls_min_version string Configures the minimal TLS version negotiated when SSL is enabled. Defaults to TLSv1.2. Options: TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3.
--dba_idle_timeout duration Idle timeout for dba connections (default 1m0s)
--dba_pool_size int Size of the connection pool for dba connections (default 20)
- -h, --help display usage and exit
+ -h, --help help for mysqlctl
--keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
--keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
--lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms)
@@ -62,9 +70,9 @@ Global flags:
--log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
--logtostderr log to standard error instead of files
--max-stack-size int configure the maximum stack size in bytes (default 67108864)
- --mysql_port int MySQL port (default 3306)
+ --mysql_port int MySQL port. (default 3306)
--mysql_server_version string MySQL server version to advertise. (default "8.0.30-Vitess")
- --mysql_socket string Path to the mysqld socket file
+ --mysql_socket string Path to the mysqld socket file.
--mysqlctl_client_protocol string the protocol to use to talk to the mysqlctl server (default "grpc")
--mysqlctl_mycnf_template string template file to use for generating the my.cnf file during server init
--mysqlctl_socket string socket file to use for remote mysqlctl actions (empty for local actions)
@@ -81,7 +89,9 @@ Global flags:
--stderrthreshold severity logs at or above this threshold go to stderr (default 1)
--table-refresh-interval int interval in milliseconds to refresh tables in status page with refreshRequired class
--tablet_dir string The directory within the vtdataroot to store vttablet/mysql files. Defaults to being generated by the tablet uid.
- --tablet_uid uint32 Tablet UID (default 41983)
+ --tablet_uid uint32 Tablet UID. (default 41983)
--v Level log level for V logs
-v, --version print binary version
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
+
+Use "mysqlctl [command] --help" for more information about a command.
diff --git a/go/flags/endtoend/mysqlctld.txt b/go/flags/endtoend/mysqlctld.txt
index 6fbbd059492..06b48347bf6 100644
--- a/go/flags/endtoend/mysqlctld.txt
+++ b/go/flags/endtoend/mysqlctld.txt
@@ -1,7 +1,28 @@
-Usage of mysqlctld:
+`mysqlctld` is a gRPC server that can be used instead of the `mysqlctl` client tool.
+If the target directories are empty when it is invoked, it automatically performs initialization operations to bootstrap the `mysqld` instance before starting it.
+The `mysqlctld` process can subsequently receive gRPC commands from a `vttablet` to perform housekeeping operations like shutting down and restarting the `mysqld` instance as needed.
+
+{{ "{{< warning >}}" }}
+`mysqld_safe` is not used so the `mysqld` process will not be automatically restarted in case of a failure.
+{{ "{{ warning>}}" }}
+
+To enable communication with a `vttablet`, the server must be configured to receive gRPC messages on a unix domain socket.
+
+Usage:
+ mysqlctld [flags]
+
+Examples:
+mysqlctld \
+ --log_dir=${VTDATAROOT}/logs \
+ --tablet_uid=100 \
+ --mysql_port=17100 \
+ --socket_file=/path/to/socket_file
+
+Flags:
--alsologtostderr log to standard error as well as files
--app_idle_timeout duration Idle timeout for app connections (default 1m0s)
--app_pool_size int Size of the connection pool for app connections (default 40)
+ --bind-address string Bind address for the server. If empty, the server will listen on all available unicast and anycast IP addresses of the local system.
--catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified
--config-file string Full path of the config file (with extension) to use. If set, --config-path, --config-type, and --config-name are ignored.
--config-file-not-found-handling ConfigFileNotFoundHandling Behavior when a config file is not found. (Options: error, exit, ignore, warn) (default warn)
@@ -44,6 +65,7 @@ Usage of mysqlctld:
--grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon).
--grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server.
--grpc_auth_static_password_file string JSON File to read the users/passwords from.
+ --grpc_bind_address string Bind address for gRPC calls. If empty, listen on all addresses.
--grpc_ca string server CA to use for gRPC connections, requires TLS, and enforces client certificate check
--grpc_cert string server certificate to use for gRPC connections, requires grpc_key, enables TLS
--grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
@@ -62,7 +84,7 @@ Usage of mysqlctld:
--grpc_server_initial_window_size int gRPC server initial window size
--grpc_server_keepalive_enforcement_policy_min_time duration gRPC server minimum keepalive time (default 10s)
--grpc_server_keepalive_enforcement_policy_permit_without_stream gRPC server permit client keepalive pings even when there are no active streams (RPCs)
- -h, --help display usage and exit
+ -h, --help help for mysqlctld
--init_db_sql_file string Path to .sql file to run after mysqld initialization
--keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
--keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
diff --git a/go/flags/endtoend/topo2topo.txt b/go/flags/endtoend/topo2topo.txt
new file mode 100644
index 00000000000..4391a32a1a8
--- /dev/null
+++ b/go/flags/endtoend/topo2topo.txt
@@ -0,0 +1,44 @@
+topo2topo copies Vitess topology data from one topo server to another.
+It can also be used to compare data between two topologies.
+
+Usage:
+ topo2topo [flags]
+
+Flags:
+ --alsologtostderr log to standard error as well as files
+ --compare compares data between topologies
+ --config-file string Full path of the config file (with extension) to use. If set, --config-path, --config-type, and --config-name are ignored.
+ --config-file-not-found-handling ConfigFileNotFoundHandling Behavior when a config file is not found. (Options: error, exit, ignore, warn) (default warn)
+ --config-name string Name of the config file (without extension) to search for. (default "vtconfig")
+ --config-path strings Paths to search for config files in. (default [{{ .Workdir }}])
+ --config-persistence-min-interval duration minimum interval between persisting dynamic config changes back to disk (if no change has occurred, nothing is done). (default 1s)
+ --config-type string Config file type (omit to infer config type from file extension).
+ --do-keyspaces copies the keyspace information
+ --do-routing-rules copies the routing rules
+ --do-shard-replications copies the shard replication information
+ --do-shards copies the shard information
+ --do-tablets copies the tablet information
+ --from_implementation string topology implementation to copy data from
+ --from_root string topology server root to copy data from
+ --from_server string topology server address to copy data from
+ --grpc_enable_tracing Enable gRPC tracing.
+ --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
+ --grpc_prometheus Enable gRPC monitoring with Prometheus.
+ -h, --help help for topo2topo
+ --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
+ --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
+ --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
+ --log_dir string If non-empty, write log files in this directory
+ --log_err_stacks log stack traces for errors
+ --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+ --logtostderr log to standard error instead of files
+ --pprof strings enable profiling
+ --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
+ --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
+ --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
+ --to_implementation string topology implementation to copy data to
+ --to_root string topology server root to copy data to
+ --to_server string topology server address to copy data to
+ --v Level log level for V logs
+ -v, --version print binary version
+ --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
diff --git a/go/flags/endtoend/vtaclcheck.txt b/go/flags/endtoend/vtaclcheck.txt
index 001d3a5b192..34bef9a05f9 100644
--- a/go/flags/endtoend/vtaclcheck.txt
+++ b/go/flags/endtoend/vtaclcheck.txt
@@ -1,4 +1,9 @@
-Usage of vtaclcheck:
+vtaclcheck checks that the access-control list (ACL) rules in a given file are valid.
+
+Usage:
+ vtaclcheck [flags]
+
+Flags:
--acl-file string The path of the JSON ACL file to check
--alsologtostderr log to standard error as well as files
--config-file string Full path of the config file (with extension) to use. If set, --config-path, --config-type, and --config-name are ignored.
@@ -7,7 +12,7 @@ Usage of vtaclcheck:
--config-path strings Paths to search for config files in. (default [{{ .Workdir }}])
--config-persistence-min-interval duration minimum interval between persisting dynamic config changes back to disk (if no change has occurred, nothing is done). (default 1s)
--config-type string Config file type (omit to infer config type from file extension).
- -h, --help display usage and exit
+ -h, --help help for vtaclcheck
--keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
--keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
diff --git a/go/flags/endtoend/vtbackup.txt b/go/flags/endtoend/vtbackup.txt
index 3678872ef65..2429f631d68 100644
--- a/go/flags/endtoend/vtbackup.txt
+++ b/go/flags/endtoend/vtbackup.txt
@@ -1,4 +1,47 @@
-Usage of vtbackup:
+vtbackup is a batch command to perform a single pass of backup maintenance for a shard.
+
+When run periodically for each shard, vtbackup can ensure these configurable policies:
+ * There is always a recent backup for the shard.
+ * Old backups for the shard are removed.
+
+Whatever system launches vtbackup is responsible for the following:
+ - Running vtbackup with similar flags that would be used for a vttablet and
+ mysqlctld in the target shard to be backed up.
+ - Provisioning as much disk space for vtbackup as would be given to vttablet.
+ The data directory MUST be empty at startup. Do NOT reuse a persistent disk.
+ - Running vtbackup periodically for each shard, for each backup storage location.
+ - Ensuring that at most one instance runs at a time for a given pair of shard
+ and backup storage location.
+ - Retrying vtbackup if it fails.
+ - Alerting human operators if the failure is persistent.
+
+The process vtbackup follows to take a new backup has the following steps:
+ 1. Restore from the most recent backup.
+ 2. Start a mysqld instance (but no vttablet) from the restored data.
+ 3. Instruct mysqld to connect to the current shard primary and replicate any
+ transactions that are new since the last backup.
+ 4. Ask the primary for its current replication position and set that as the goal
+ for catching up on replication before taking the backup, so the goalposts
+ don't move.
+ 5. Wait until replication is caught up to the goal position or beyond.
+ 6. Stop mysqld and take a new backup.
+
+Aside from additional replication load while vtbackup's mysqld catches up on
+new transactions, the shard should be otherwise unaffected. Existing tablets
+will continue to serve, and no new tablets will appear in topology, meaning no
+query traffic will ever be routed to vtbackup's mysqld. This silent operation
+mode helps make backups minimally disruptive to serving capacity and orthogonal
+to the handling of the query path.
+
+The command-line parameters to vtbackup specify a policy for when a new backup
+is needed, and when old backups should be removed. If the existing backups
+already satisfy the policy, then vtbackup will do nothing and return success
+immediately.
+
+Usage:
+ vtbackup [flags]
+
+Flags:
--allow_first_backup Allow this job to take the first backup of an existing shard.
--alsologtostderr log to standard error as well as files
--azblob_backup_account_key_file string Path to a file containing the Azure Storage account key; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_KEY will be used as the key itself (NOT a file path).
@@ -12,6 +55,7 @@ Usage of vtbackup:
--backup_storage_compress if set, the backup files will be compressed. (default true)
--backup_storage_implementation string Which backup storage implementation to use for creating and restoring backups.
--backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, in parallel, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression. (default 2)
+ --bind-address string Bind address for the server. If empty, the server will listen on all available unicast and anycast IP addresses of the local system.
--builtinbackup-file-read-buffer-size uint read files using an IO buffer of this many bytes. Golang defaults are used when set to 0.
--builtinbackup-file-write-buffer-size uint write files using an IO buffer of this many bytes. Golang defaults are used when set to 0. (default 2097152)
--builtinbackup_mysqld_timeout duration how long to wait for mysqld to shutdown at the start of the backup. (default 10m0s)
@@ -92,7 +136,7 @@ Usage of vtbackup:
--grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s)
--grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 268435456)
--grpc_prometheus Enable gRPC monitoring with Prometheus.
- -h, --help display usage and exit
+ -h, --help help for vtbackup
--incremental_from_pos string Position of previous backup. Default: empty. If given, then this backup becomes an incremental backup from given position. If value is 'auto', backup taken from last successful backup position
--init_db_name_override string (init parameter) override the name of the db used by vttablet
--init_db_sql_file string path to .sql file to run after mysql_install_db
@@ -134,6 +178,7 @@ Usage of vtbackup:
--mysql_server_version string MySQL server version to advertise. (default "8.0.30-Vitess")
--mysql_socket string path to the mysql socket
--mysql_timeout duration how long to wait for mysqld startup (default 5m0s)
+ --opentsdb_uri string URI of opentsdb /api/put method
--port int port for the server
--pprof strings enable profiling
--purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
@@ -184,6 +229,7 @@ Usage of vtbackup:
--topo_zk_tls_ca string the server ca to use to validate servers when connecting to the zk topo server
--topo_zk_tls_cert string the cert to use to connect to the zk topo server, requires topo_zk_tls_key, enables TLS
--topo_zk_tls_key string the key to use to connect to the zk topo server, enables TLS
+ --upgrade-safe Whether to use innodb_fast_shutdown=0 for the backup so it is safe to use for MySQL upgrades.
--v Level log level for V logs
-v, --version print binary version
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
diff --git a/go/flags/endtoend/vtbench.txt b/go/flags/endtoend/vtbench.txt
new file mode 100644
index 00000000000..d74dc13ebc8
--- /dev/null
+++ b/go/flags/endtoend/vtbench.txt
@@ -0,0 +1,97 @@
+vtbench is a simple load testing client to compare workloads in Vitess across the various client/server protocols.
+
+Usage:
+ vtbench [flags]
+
+Examples:
+There are a number of command line options to control the behavior,
+but as a basic example, the three supported client protocols are:
+
+Mysql protocol to vtgate:
+vtbench \
+ --protocol mysql \
+ --host vtgate-host.my.domain \
+ --port 15306 \
+ --user db_username \
+ --db-credentials-file ./vtbench_db_creds.json \
+ --db @replica \
+ --sql "select * from loadtest_table where id=123456789" \
+ --threads 10 \
+ --count 10
+
+GRPC to vtgate:
+vtbench \
+ --protocol grpc-vtgate \
+ --host vtgate-host.my.domain \
+ --port 15999 \
+ --db @replica \
+ $VTTABLET_GRPC_ARGS \
+ --sql "select * from loadtest_table where id=123456789" \
+ --threads 10 \
+ --count 10
+
+GRPC to vttablet:
+vtbench \
+ --protocol grpc-vttablet \
+ --host tablet-loadtest-00-80.my.domain \
+ --port 15999 \
+ --db loadtest/00-80@replica \
+ --sql "select * from loadtest_table where id=123456789" \
+ --threads 10 \
+ --count 10
+
+Flags:
+ --alsologtostderr log to standard error as well as files
+ --config-file string Full path of the config file (with extension) to use. If set, --config-path, --config-type, and --config-name are ignored.
+ --config-file-not-found-handling ConfigFileNotFoundHandling Behavior when a config file is not found. (Options: error, exit, ignore, warn) (default warn)
+ --config-name string Name of the config file (without extension) to search for. (default "vtconfig")
+ --config-path strings Paths to search for config files in. (default [{{ .Workdir }}])
+ --config-persistence-min-interval duration minimum interval between persisting dynamic config changes back to disk (if no change has occurred, nothing is done). (default 1s)
+ --config-type string Config file type (omit to infer config type from file extension).
+ --count int Number of queries per thread (default 1000)
+ --db string Database name to use when connecting / running the queries (e.g. @replica, keyspace, keyspace/shard etc)
+ --deadline duration Maximum duration for the test run (default 5 minutes) (default 5m0s)
+ --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server.
+ --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
+ --grpc_enable_tracing Enable gRPC tracing.
+ --grpc_initial_conn_window_size int gRPC initial connection window size
+ --grpc_initial_window_size int gRPC initial window size
+ --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
+ --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s)
+ --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
+ --grpc_prometheus Enable gRPC monitoring with Prometheus.
+ -h, --help help for vtbench
+ --host string VTGate host(s) in the form 'host1,host2,...'
+ --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
+ --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
+ --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
+ --log_dir string If non-empty, write log files in this directory
+ --log_err_stacks log stack traces for errors
+ --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+ --logtostderr log to standard error instead of files
+ --mysql_server_version string MySQL server version to advertise. (default "8.0.30-Vitess")
+ --port int VTGate port
+ --pprof strings enable profiling
+ --protocol string Client protocol, either mysql (default), grpc-vtgate, or grpc-vttablet (default "mysql")
+ --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
+ --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
+ --sql string SQL statement to execute
+ --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited)
+ --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512)
+ --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
+ --tablet_grpc_ca string the server ca to use to validate servers when connecting
+ --tablet_grpc_cert string the cert to use to connect
+ --tablet_grpc_crl string the server crl to use to validate server certificates when connecting
+ --tablet_grpc_key string the key to use to connect
+ --tablet_grpc_server_name string the server name to use to validate server certificate
+ --threads int Number of parallel threads to run (default 2)
+ --unix_socket string VTGate unix socket
+ --user string Username to connect using mysql (password comes from the db-credentials-file)
+ --v Level log level for V logs
+ -v, --version print binary version
+ --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
+ --vtgate_grpc_ca string the server ca to use to validate servers when connecting
+ --vtgate_grpc_cert string the cert to use to connect
+ --vtgate_grpc_crl string the server crl to use to validate server certificates when connecting
+ --vtgate_grpc_key string the key to use to connect
+ --vtgate_grpc_server_name string the server name to use to validate server certificate
diff --git a/go/flags/endtoend/vtclient.txt b/go/flags/endtoend/vtclient.txt
new file mode 100644
index 00000000000..3d17734168c
--- /dev/null
+++ b/go/flags/endtoend/vtclient.txt
@@ -0,0 +1,52 @@
+vtclient connects to a vtgate server using the standard go driver API.
+
+For query bound variables, we assume place-holders in the query string
+in the form of :v1, :v2, etc.
+
+Usage:
+ vtclient [flags]
+
+Examples:
+vtclient --server vtgate:15991 "SELECT * FROM messages"
+
+vtclient --server vtgate:15991 --target '@primary' --bind_variables '[ 12345, 1, "msg 12345" ]' "INSERT INTO messages (page,time_created_ns,message) VALUES (:v1, :v2, :v3)"
+
+Flags:
+ --alsologtostderr log to standard error as well as files
+ --bind_variables float bind variables as a json list (default null)
+ --config-file string Full path of the config file (with extension) to use. If set, --config-path, --config-type, and --config-name are ignored.
+ --config-file-not-found-handling ConfigFileNotFoundHandling Behavior when a config file is not found. (Options: error, exit, ignore, warn) (default warn)
+ --config-name string Name of the config file (without extension) to search for. (default "vtconfig")
+ --config-path strings Paths to search for config files in. (default [{{ .Workdir }}])
+ --config-persistence-min-interval duration minimum interval between persisting dynamic config changes back to disk (if no change has occurred, nothing is done). (default 1s)
+ --config-type string Config file type (omit to infer config type from file extension).
+ --count int DMLs only: Number of times each thread executes the query. Useful for simple, sustained load testing. (default 1)
+ --grpc_enable_tracing Enable gRPC tracing.
+ --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
+ --grpc_prometheus Enable gRPC monitoring with Prometheus.
+ -h, --help help for vtclient
+ --json Output JSON instead of human-readable table
+ --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
+ --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
+ --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
+ --log_dir string If non-empty, write log files in this directory
+ --log_err_stacks log stack traces for errors
+ --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+ --logtostderr log to standard error instead of files
+ --max_sequence_id int max sequence ID.
+ --min_sequence_id int min sequence ID to generate. When max_sequence_id > min_sequence_id, for each query, a number is generated in [min_sequence_id, max_sequence_id) and attached to the end of the bind variables.
+ --mysql_server_version string MySQL server version to advertise. (default "8.0.30-Vitess")
+ --parallel int DMLs only: Number of threads executing the same query in parallel. Useful for simple load testing. (default 1)
+ --pprof strings enable profiling
+ --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
+ --qps int queries per second to throttle each thread at.
+ --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
+ --server string vtgate server to connect to
+ --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
+ --streaming use a streaming query
+ --target string keyspace:shard@tablet_type
+ --timeout duration timeout for queries (default 30s)
+ --use_random_sequence use random sequence for generating [min_sequence_id, max_sequence_id)
+ --v Level log level for V logs
+ -v, --version print binary version
+ --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
diff --git a/go/flags/endtoend/vtcombo.txt b/go/flags/endtoend/vtcombo.txt
new file mode 100644
index 00000000000..a2273cbd25d
--- /dev/null
+++ b/go/flags/endtoend/vtcombo.txt
@@ -0,0 +1,437 @@
+vtcombo is a single binary containing several vitess components.
+
+In particular, it contains:
+- A topology server based on an in-memory map.
+- One vtgate instance.
+- Many vttablet instances.
+- A vtctld instance so it's easy to see the topology.
+
+Usage:
+ vtcombo [flags]
+
+Flags:
+ --action_timeout duration time to wait for an action before resorting to force (default 1m0s)
+ --allow-kill-statement Allows the execution of kill statement
+ --allowed_tablet_types strings Specifies the tablet types this vtgate is allowed to route queries to. Should be provided as a comma-separated set of tablet types.
+ --alsologtostderr log to standard error as well as files
+ --app_idle_timeout duration Idle timeout for app connections (default 1m0s)
+ --app_pool_size int Size of the connection pool for app connections (default 40)
+ --backup_engine_implementation string Specifies which implementation to use for creating new backups (builtin or xtrabackup). Restores will always be done with whichever engine created a given backup. (default "builtin")
+ --backup_storage_block_size int if backup_storage_compress is true, backup_storage_block_size sets the byte size for each block while compressing (default is 250000). (default 250000)
+ --backup_storage_compress if set, the backup files will be compressed. (default true)
+ --backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, in parallel, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression. (default 2)
+ --bind-address string Bind address for the server. If empty, the server will listen on all available unicast and anycast IP addresses of the local system.
+ --binlog_host string PITR restore parameter: hostname/IP of binlog server.
+ --binlog_password string PITR restore parameter: password of binlog server.
+ --binlog_player_protocol string the protocol to download binlogs from a vttablet (default "grpc")
+ --binlog_port int PITR restore parameter: port of binlog server.
+ --binlog_ssl_ca string PITR restore parameter: Filename containing TLS CA certificate to verify binlog server TLS certificate against.
+ --binlog_ssl_cert string PITR restore parameter: Filename containing mTLS client certificate to present to binlog server as authentication.
+ --binlog_ssl_key string PITR restore parameter: Filename containing mTLS client private key for use in binlog server authentication.
+ --binlog_ssl_server_name string PITR restore parameter: TLS server name (common name) to verify against for the binlog server we are connecting to (If not set: use the hostname or IP supplied in --binlog_host).
+ --binlog_user string PITR restore parameter: username of binlog server.
+ --buffer_drain_concurrency int Maximum number of requests retried simultaneously. More concurrency will increase the load on the PRIMARY vttablet when draining the buffer. (default 1)
+ --buffer_keyspace_shards string If not empty, limit buffering to these entries (comma separated). Entry format: keyspace or keyspace/shard. Requires --enable_buffer=true.
+ --buffer_max_failover_duration duration Stop buffering completely if a failover takes longer than this duration. (default 20s)
+ --buffer_min_time_between_failovers duration Minimum time between the end of a failover and the start of the next one (tracked per shard). Faster consecutive failovers will not trigger buffering. (default 1m0s)
+ --buffer_size int Maximum number of buffered requests in flight (across all ongoing failovers). (default 1000)
+ --buffer_window duration Duration for how long a request should be buffered at most. (default 10s)
+ --builtinbackup-file-read-buffer-size uint read files using an IO buffer of this many bytes. Golang defaults are used when set to 0.
+ --builtinbackup-file-write-buffer-size uint write files using an IO buffer of this many bytes. Golang defaults are used when set to 0. (default 2097152)
+ --builtinbackup_mysqld_timeout duration how long to wait for mysqld to shutdown at the start of the backup. (default 10m0s)
+ --builtinbackup_progress duration how often to send progress updates when backing up large files. (default 5s)
+ --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified
+ --cell string cell to use
+ --compression-engine-name string compressor engine used for compression. (default "pargzip")
+ --compression-level int what level to pass to the compressor. (default 1)
+ --config-file string Full path of the config file (with extension) to use. If set, --config-path, --config-type, and --config-name are ignored.
+ --config-file-not-found-handling ConfigFileNotFoundHandling Behavior when a config file is not found. (Options: error, exit, ignore, warn) (default warn)
+ --config-name string Name of the config file (without extension) to search for. (default "vtconfig")
+ --config-path strings Paths to search for config files in. (default [{{ .Workdir }}])
+ --config-persistence-min-interval duration minimum interval between persisting dynamic config changes back to disk (if no change has occurred, nothing is done). (default 1s)
+ --config-type string Config file type (omit to infer config type from file extension).
+ --consolidator-stream-query-size int Configure the stream consolidator query size in bytes. Setting to 0 disables the stream consolidator. (default 2097152)
+ --consolidator-stream-total-size int Configure the stream consolidator total size in bytes. Setting to 0 disables the stream consolidator. (default 134217728)
+ --consul_auth_static_file string JSON File to read the topos/tokens from.
+ --datadog-agent-host string host to send spans to. if empty, no tracing will be done
+ --datadog-agent-port string port to send spans to. if empty, no tracing will be done
+ --db-credentials-file string db credentials file; send SIGHUP to reload this file
+ --db-credentials-server string db credentials server type ('file' - file implementation; 'vault' - HashiCorp Vault implementation) (default "file")
+ --db-credentials-vault-addr string URL to Vault server
+ --db-credentials-vault-path string Vault path to credentials JSON blob, e.g.: secret/data/prod/dbcreds
+ --db-credentials-vault-role-mountpoint string Vault AppRole mountpoint; can also be passed using VAULT_MOUNTPOINT environment variable (default "approle")
+ --db-credentials-vault-role-secretidfile string Path to file containing Vault AppRole secret_id; can also be passed using VAULT_SECRETID environment variable
+ --db-credentials-vault-roleid string Vault AppRole id; can also be passed using VAULT_ROLEID environment variable
+ --db-credentials-vault-timeout duration Timeout for vault API operations (default 10s)
+ --db-credentials-vault-tls-ca string Path to CA PEM for validating Vault server certificate
+ --db-credentials-vault-tokenfile string Path to file containing Vault auth token; token can also be passed using VAULT_TOKEN environment variable
+ --db-credentials-vault-ttl duration How long to cache DB credentials from the Vault server (default 30m0s)
+ --db_allprivs_password string db allprivs password
+ --db_allprivs_use_ssl Set this flag to false to make the allprivs connection to not use ssl (default true)
+ --db_allprivs_user string db allprivs user userKey (default "vt_allprivs")
+ --db_app_password string db app password
+ --db_app_use_ssl Set this flag to false to make the app connection to not use ssl (default true)
+ --db_app_user string db app user userKey (default "vt_app")
+ --db_appdebug_password string db appdebug password
+ --db_appdebug_use_ssl Set this flag to false to make the appdebug connection to not use ssl (default true)
+ --db_appdebug_user string db appdebug user userKey (default "vt_appdebug")
+ --db_charset string Character set used for this tablet. (default "utf8mb4")
+ --db_conn_query_info enable parsing and processing of QUERY_OK info fields
+ --db_connect_timeout_ms int connection timeout to mysqld in milliseconds (0 for no timeout)
+ --db_dba_password string db dba password
+ --db_dba_use_ssl Set this flag to false to make the dba connection to not use ssl (default true)
+ --db_dba_user string db dba user userKey (default "vt_dba")
+ --db_erepl_password string db erepl password
+ --db_erepl_use_ssl Set this flag to false to make the erepl connection to not use ssl (default true)
+ --db_erepl_user string db erepl user userKey (default "vt_erepl")
+ --db_filtered_password string db filtered password
+ --db_filtered_use_ssl Set this flag to false to make the filtered connection to not use ssl (default true)
+ --db_filtered_user string db filtered user userKey (default "vt_filtered")
+ --db_flags uint Flag values as defined by MySQL.
+ --db_flavor string Flavor overrid. Valid value is FilePos.
+ --db_host string The host name for the tcp connection.
+ --db_port int tcp port
+ --db_repl_password string db repl password
+ --db_repl_use_ssl Set this flag to false to make the repl connection to not use ssl (default true)
+ --db_repl_user string db repl user userKey (default "vt_repl")
+ --db_server_name string server name of the DB we are connecting to.
+ --db_socket string The unix socket to connect on. If this is specified, host and port will not be used.
+ --db_ssl_ca string connection ssl ca
+ --db_ssl_ca_path string connection ssl ca path
+ --db_ssl_cert string connection ssl certificate
+ --db_ssl_key string connection ssl key
+ --db_ssl_mode SslMode SSL mode to connect with. One of disabled, preferred, required, verify_ca & verify_identity.
+ --db_tls_min_version string Configures the minimal TLS version negotiated when SSL is enabled. Defaults to TLSv1.2. Options: TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3.
+ --dba_idle_timeout duration Idle timeout for dba connections (default 1m0s)
+ --dba_pool_size int Size of the connection pool for dba connections (default 20)
+ --dbddl_plugin string controls how to handle CREATE/DROP DATABASE. use it if you are using your own database provisioning service (default "fail")
+ --ddl_strategy string Set default strategy for DDL statements. Override with @@ddl_strategy session variable (default "direct")
+ --default_tablet_type topodatapb.TabletType The default tablet type to set for queries, when one is not explicitly selected. (default PRIMARY)
+ --degraded_threshold duration replication lag after which a replica is considered degraded (default 30s)
+ --disable_active_reparents if set, do not allow active reparents. Use this to protect a cluster using external reparents.
+ --emit_stats If set, emit stats to push-based monitoring and stats backends
+ --enable-consolidator Synonym to -enable_consolidator (default true)
+ --enable-consolidator-replicas Synonym to -enable_consolidator_replicas
+ --enable-partial-keyspace-migration (Experimental) Follow shard routing rules: enable only while migrating a keyspace shard by shard. See documentation on Partial MoveTables for more. (default false)
+ --enable-per-workload-table-metrics If true, query counts and query error metrics include a label that identifies the workload
+ --enable-tx-throttler Synonym to -enable_tx_throttler
+ --enable-views Enable views support in vtgate.
+ --enable_buffer Enable buffering (stalling) of primary traffic during failovers.
+ --enable_buffer_dry_run Detect and log failover events, but do not actually buffer requests.
+ --enable_consolidator This option enables the query consolidator. (default true)
+ --enable_consolidator_replicas This option enables the query consolidator only on replicas.
+ --enable_direct_ddl Allow users to submit direct DDL statements (default true)
+ --enable_hot_row_protection If true, incoming transactions for the same row (range) will be queued and cannot consume all txpool slots.
+ --enable_hot_row_protection_dry_run If true, hot row protection is not enforced but logs if transactions would have been queued.
+ --enable_online_ddl Allow users to submit, review and control Online DDL (default true)
+ --enable_replication_reporter Use polling to track replication lag.
+ --enable_set_var This will enable the use of MySQL's SET_VAR query hint for certain system variables instead of using reserved connections (default true)
+ --enable_system_settings This will enable the system settings to be changed per session at the database connection level (default true)
+ --enable_transaction_limit If true, limit on number of transactions open at the same time will be enforced for all users. User trying to open a new transaction after exhausting their limit will receive an error immediately, regardless of whether there are available slots or not.
+ --enable_transaction_limit_dry_run If true, limit on number of transactions open at the same time will be tracked for all users, but not enforced.
+ --enable_tx_throttler If true replication-lag-based throttling on transactions will be enabled.
+ --enforce_strict_trans_tables If true, vttablet requires MySQL to run with STRICT_TRANS_TABLES or STRICT_ALL_TABLES on. It is recommended to not turn this flag off. Otherwise MySQL may alter your supplied values before saving them to the database. (default true)
+ --external-compressor string command with arguments to use when compressing a backup.
+ --external-compressor-extension string extension to use when using an external compressor.
+ --external-decompressor string command with arguments to use when decompressing a backup.
+ --external_topo_server Should vtcombo use an external topology server instead of starting its own in-memory topology server. If true, vtcombo will use the flags defined in topo/server.go to open topo server
+ --foreign_key_mode string This is to provide how to handle foreign key constraint in create/alter table. Valid values are: allow, disallow (default "allow")
+ --gate_query_cache_memory int gate server query cache size in bytes, maximum amount of memory to be cached. vtgate analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache. (default 33554432)
+ --gc_check_interval duration Interval between garbage collection checks (default 1h0m0s)
+ --gc_purge_check_interval duration Interval between purge discovery checks (default 1m0s)
+ --gh-ost-path string override default gh-ost binary full path
+ --grpc-send-session-in-streaming If set, will send the session as last packet in streaming api to support transactions in streaming
+ --grpc-use-effective-groups If set, and SSL is not used, will set the immediate caller's security groups from the effective caller id's groups.
+ --grpc-use-static-authentication-callerid If set, will set the immediate caller id to the username authenticated by the static auth plugin.
+ --grpc_auth_mode string Which auth plugin implementation to use (eg: static)
+ --grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon).
+ --grpc_auth_static_password_file string JSON File to read the users/passwords from.
+ --grpc_bind_address string Bind address for gRPC calls. If empty, listen on all addresses.
+ --grpc_ca string server CA to use for gRPC connections, requires TLS, and enforces client certificate check
+ --grpc_cert string server certificate to use for gRPC connections, requires grpc_key, enables TLS
+ --grpc_crl string path to a certificate revocation list in PEM format, client certificates will be further verified against this file during TLS handshake
+ --grpc_enable_optional_tls enable optional TLS mode when a server accepts both TLS and plain-text connections on the same port
+ --grpc_enable_tracing Enable gRPC tracing.
+ --grpc_key string server private key to use for gRPC connections, requires grpc_cert, enables TLS
+ --grpc_max_connection_age duration Maximum age of a client connection before GoAway is sent. (default 2562047h47m16.854775807s)
+ --grpc_max_connection_age_grace duration Additional grace period after grpc_max_connection_age, after which connections are forcibly closed. (default 2562047h47m16.854775807s)
+ --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
+ --grpc_port int Port to listen on for gRPC calls. If zero, do not listen.
+ --grpc_prometheus Enable gRPC monitoring with Prometheus.
+ --grpc_server_ca string path to server CA in PEM format, which will be combine with server cert, return full certificate chain to clients
+ --grpc_server_initial_conn_window_size int gRPC server initial connection window size
+ --grpc_server_initial_window_size int gRPC server initial window size
+ --grpc_server_keepalive_enforcement_policy_min_time duration gRPC server minimum keepalive time (default 10s)
+ --grpc_server_keepalive_enforcement_policy_permit_without_stream gRPC server permit client keepalive pings even when there are no active streams (RPCs)
+ --grpc_use_effective_callerid If set, and SSL is not used, will set the immediate caller id from the effective caller id's principal.
+ --health_check_interval duration Interval between health checks (default 20s)
+ --healthcheck_retry_delay duration health check retry delay (default 2ms)
+ --healthcheck_timeout duration the health check timeout period (default 1m0s)
+ --heartbeat_enable If true, vttablet records (if master) or checks (if replica) the current time of a replication heartbeat in the sidecar database's heartbeat table. The result is used to inform the serving state of the vttablet via healthchecks.
+ --heartbeat_interval duration How frequently to read and write replication heartbeat. (default 1s)
+ --heartbeat_on_demand_duration duration If non-zero, heartbeats are only written upon consumer request, and only run for up to given duration following the request. Frequent requests can keep the heartbeat running consistently; when requests are infrequent heartbeat may completely stop between requests
+ -h, --help help for vtcombo
+ --hot_row_protection_concurrent_transactions int Number of concurrent transactions let through to the txpool/MySQL for the same hot row. Should be > 1 to have enough 'ready' transactions in MySQL and benefit from a pipelining effect. (default 5)
+ --hot_row_protection_max_global_queue_size int Global queue limit across all row (ranges). Useful to prevent that the queue can grow unbounded. (default 1000)
+ --hot_row_protection_max_queue_size int Maximum number of BeginExecute RPCs which will be queued for the same row (range). (default 20)
+ --init_db_name_override string (init parameter) override the name of the db used by vttablet. Without this flag, the db name defaults to vt_
+ --init_keyspace string (init parameter) keyspace to use for this tablet
+ --init_shard string (init parameter) shard to use for this tablet
+ --init_tablet_type string (init parameter) the tablet type to use for this tablet.
+ --init_tags StringMap (init parameter) comma separated list of key:value pairs used to tag the tablet
+ --init_timeout duration (init parameter) timeout to use for the init phase. (default 1m0s)
+ --jaeger-agent-host string host and port to send spans to. if empty, no tracing will be done
+ --json_topo vttest.TopoData vttest proto definition of the topology, encoded in json format. See vttest.proto for more information.
+ --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
+ --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
+ --keyspaces_to_watch strings Specifies which keyspaces this vtgate should have access to while routing queries or accessing the vschema.
+ --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms)
+ --lock-timeout duration Maximum time for which a shard/keyspace lock can be acquired for (default 45s)
+ --lock_heartbeat_time duration If there is lock function used. This will keep the lock connection active by using this heartbeat (default 5s)
+ --lock_tables_timeout duration How long to keep the table locked before timing out (default 1m0s)
+ --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
+ --log_dir string If non-empty, write log files in this directory
+ --log_err_stacks log stack traces for errors
+ --log_queries_to_file string Enable query logging to the specified file
+ --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+ --logtostderr log to standard error instead of files
+ --manifest-external-decompressor string command with arguments to store in the backup manifest when compressing a backup with an external compression engine.
+ --max-stack-size int configure the maximum stack size in bytes (default 67108864)
+ --max_concurrent_online_ddl int Maximum number of online DDL changes that may run concurrently (default 256)
+ --max_memory_rows int Maximum number of rows that will be held in memory for intermediate results as well as the final result. (default 300000)
+ --max_payload_size int The threshold for query payloads in bytes. A payload greater than this threshold will result in a failure to handle the query.
+ --message_stream_grace_period duration the amount of time to give for a vttablet to resume if it ends a message stream, usually because of a reparent. (default 30s)
+ --migration_check_interval duration Interval between migration checks (default 1m0s)
+ --mycnf-file string path to my.cnf, if reading all config params from there
+ --mycnf_bin_log_path string mysql binlog path
+ --mycnf_data_dir string data directory for mysql
+ --mycnf_error_log_path string mysql error log path
+ --mycnf_general_log_path string mysql general log path
+ --mycnf_innodb_data_home_dir string Innodb data home directory
+ --mycnf_innodb_log_group_home_dir string Innodb log group home directory
+ --mycnf_master_info_file string mysql master.info file
+ --mycnf_mysql_port int port mysql is listening on
+ --mycnf_pid_file string mysql pid file
+ --mycnf_relay_log_index_path string mysql relay log index path
+ --mycnf_relay_log_info_path string mysql relay log info path
+ --mycnf_relay_log_path string mysql relay log path
+ --mycnf_secure_file_priv string mysql path for loading secure files
+ --mycnf_server_id int mysql server id of the server (if specified, mycnf-file will be ignored)
+ --mycnf_slow_log_path string mysql slow query log path
+ --mycnf_socket_file string mysql socket file
+ --mycnf_tmp_dir string mysql tmp directory
+ --mysql-server-keepalive-period duration TCP period between keep-alives
+ --mysql-server-pool-conn-read-buffers If set, the server will pool incoming connection read buffers
+ --mysql_allow_clear_text_without_tls If set, the server will allow the use of a clear text password over non-SSL connections.
+ --mysql_auth_server_impl string Which auth server implementation to use. Options: none, ldap, clientcert, static, vault. (default "static")
+ --mysql_default_workload string Default session workload (OLTP, OLAP, DBA) (default "OLTP")
+ --mysql_port int mysql port (default 3306)
+ --mysql_server_bind_address string Binds on this address when listening to MySQL binary protocol. Useful to restrict listening to 'localhost' only for instance.
+ --mysql_server_port int If set, also listen for MySQL binary protocol connections on this port. (default -1)
+ --mysql_server_query_timeout duration mysql query timeout
+ --mysql_server_read_timeout duration connection read timeout
+ --mysql_server_require_secure_transport Reject insecure connections but only if mysql_server_ssl_cert and mysql_server_ssl_key are provided
+ --mysql_server_socket_path string This option specifies the Unix socket file to use when listening for local connections. By default it will be empty and it won't listen to a unix socket
+ --mysql_server_ssl_ca string Path to ssl CA for mysql server plugin SSL. If specified, server will require and validate client certs.
+ --mysql_server_ssl_cert string Path to the ssl cert for mysql server plugin SSL
+ --mysql_server_ssl_crl string Path to ssl CRL for mysql server plugin SSL
+ --mysql_server_ssl_key string Path to ssl key for mysql server plugin SSL
+ --mysql_server_ssl_server_ca string path to server CA in PEM format, which will be combine with server cert, return full certificate chain to clients
+ --mysql_server_tls_min_version string Configures the minimal TLS version negotiated when SSL is enabled. Defaults to TLSv1.2. Options: TLSv1.0, TLSv1.1, TLSv1.2, TLSv1.3.
+ --mysql_server_version string MySQL server version to advertise. (default "8.0.30-Vitess")
+ --mysql_server_write_timeout duration connection write timeout
+ --mysql_slow_connect_warn_threshold duration Warn if it takes more than the given threshold for a mysql connection to establish
+ --mysql_tcp_version string Select tcp, tcp4, or tcp6 to control the socket type. (default "tcp")
+ --mysqlctl_mycnf_template string template file to use for generating the my.cnf file during server init
+ --mysqlctl_socket string socket file to use for remote mysqlctl actions (empty for local actions)
+ --no_scatter when set to true, the planner will fail instead of producing a plan that includes scatter queries
+ --normalize_queries Rewrite queries with bind vars. Turn this off if the app itself sends normalized queries with bind vars. (default true)
+ --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 10s)
+ --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s)
+ --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
+ --pitr_gtid_lookup_timeout duration PITR restore parameter: timeout for fetching gtid from timestamp. (default 1m0s)
+ --planner-version string Sets the default planner to use when the session has not changed it. Valid values are: Gen4, Gen4Greedy, Gen4Left2Right
+ --pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled)
+ --port int port for the server
+ --pprof strings enable profiling
+ --proto_topo vttest.TopoData vttest proto definition of the topology, encoded in compact text format. See vttest.proto for more information.
+ --proxy_protocol Enable HAProxy PROXY protocol on MySQL listener socket
+ --proxy_tablets Setting this true will make vtctld proxy the tablet status instead of redirecting to them
+ --pt-osc-path string override default pt-online-schema-change binary full path
+ --publish_retry_interval duration how long vttablet waits to retry publishing the tablet record (default 30s)
+ --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
+ --query-log-stream-handler string URL handler for streaming queries log (default "/debug/querylog")
+ --query-timeout int Sets the default query timeout (in ms). Can be overridden by session variable (query_timeout) or comment directive (QUERY_TIMEOUT_MS)
+ --querylog-buffer-size int Maximum number of buffered query logs before throttling log output (default 10)
+ --querylog-filter-tag string string that must be present in the query for it to be logged; if using a value as the tag, you need to disable query normalization
+ --querylog-format string format for query logs ("text" or "json") (default "text")
+ --querylog-row-threshold uint Number of rows a query has to return or affect before being logged; not useful for streaming queries. 0 means all queries will be logged.
+ --queryserver-config-acl-exempt-acl string an acl that exempt from table acl checking (this acl is free to access any vitess tables).
+ --queryserver-config-annotate-queries prefix queries to MySQL backend with comment indicating vtgate principal (user) and target tablet type
+ --queryserver-config-enable-table-acl-dry-run If this flag is enabled, tabletserver will emit monitoring metrics and let the request pass regardless of table acl check results
+ --queryserver-config-idle-timeout duration query server idle timeout (in seconds), vttablet manages various mysql connection pools. This config means if a connection has not been used in given idle timeout, this connection will be removed from pool. This effectively manages number of connection objects and optimize the pool performance. (default 30m0s)
+ --queryserver-config-max-result-size int query server max result size, maximum number of rows allowed to return from vttablet for non-streaming queries. (default 10000)
+ --queryserver-config-message-postpone-cap int query server message postpone cap is the maximum number of messages that can be postponed at any given time. Set this number to substantially lower than transaction cap, so that the transaction pool isn't exhausted by the message subsystem. (default 4)
+ --queryserver-config-olap-transaction-timeout duration query server transaction timeout (in seconds), after which a transaction in an OLAP session will be killed (default 30s)
+ --queryserver-config-passthrough-dmls query server pass through all dml statements without rewriting
+ --queryserver-config-pool-conn-max-lifetime duration query server connection max lifetime (in seconds), vttablet manages various mysql connection pools. This config means if a connection has lived at least this long, it connection will be removed from pool upon the next time it is returned to the pool. (default 0s)
+ --queryserver-config-pool-size int query server read pool size, connection pool is used by regular queries (non streaming, not in a transaction) (default 16)
+ --queryserver-config-query-cache-memory int query server query cache size in bytes, maximum amount of memory to be used for caching. vttablet analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache. (default 33554432)
+ --queryserver-config-query-pool-timeout duration query server query pool timeout (in seconds), it is how long vttablet waits for a connection from the query pool. If set to 0 (default) then the overall query timeout is used instead. (default 0s)
+ --queryserver-config-query-pool-waiter-cap int query server query pool waiter limit, this is the maximum number of queries that can be queued waiting to get a connection (default 5000)
+ --queryserver-config-query-timeout duration query server query timeout (in seconds), this is the query timeout in vttablet side. If a query takes more than this timeout, it will be killed. (default 30s)
+ --queryserver-config-schema-change-signal query server schema signal, will signal connected vtgates that schema has changed whenever this is detected. VTGates will need to have -schema_change_signal enabled for this to work (default true)
+ --queryserver-config-schema-reload-time duration query server schema reload time, how often vttablet reloads schemas from underlying MySQL instance in seconds. vttablet keeps table schemas in its own memory and periodically refreshes it from MySQL. This config controls the reload time. (default 30m0s)
+ --queryserver-config-stream-buffer-size int query server stream buffer size, the maximum number of bytes sent from vttablet for each stream call. It's recommended to keep this value in sync with vtgate's stream_buffer_size. (default 32768)
+ --queryserver-config-stream-pool-size int query server stream connection pool size, stream pool is used by stream queries: queries that return results to client in a streaming fashion (default 200)
+ --queryserver-config-stream-pool-timeout duration query server stream pool timeout (in seconds), it is how long vttablet waits for a connection from the stream pool. If set to 0 (default) then there is no timeout. (default 0s)
+ --queryserver-config-stream-pool-waiter-cap int query server stream pool waiter limit, this is the maximum number of streaming queries that can be queued waiting to get a connection
+ --queryserver-config-strict-table-acl only allow queries that pass table acl checks
+ --queryserver-config-terse-errors prevent bind vars from escaping in client error messages
+ --queryserver-config-transaction-cap int query server transaction cap is the maximum number of transactions allowed to happen at any given point of a time for a single vttablet. E.g. by setting transaction cap to 100, there are at most 100 transactions will be processed by a vttablet and the 101th transaction will be blocked (and fail if it cannot get connection within specified timeout) (default 20)
+ --queryserver-config-transaction-timeout duration query server transaction timeout (in seconds), a transaction will be killed if it takes longer than this value (default 30s)
+ --queryserver-config-truncate-error-len int truncate errors sent to client if they are longer than this value (0 means do not truncate)
+ --queryserver-config-txpool-timeout duration query server transaction pool timeout, it is how long vttablet waits if tx pool is full (default 1s)
+ --queryserver-config-txpool-waiter-cap int query server transaction pool waiter limit, this is the maximum number of transactions that can be queued waiting to get a connection (default 5000)
+ --queryserver-config-warn-result-size int query server result size warning threshold, warn if number of rows returned from vttablet for non-streaming queries exceeds this
+ --queryserver-enable-settings-pool Enable pooling of connections with modified system settings (default true)
+ --queryserver-enable-views Enable views support in vttablet.
+ --queryserver_enable_online_ddl Enable online DDL. (default true)
+ --redact-debug-ui-queries redact full queries and bind variables from debug UI
+ --relay_log_max_items int Maximum number of rows for VReplication target buffering. (default 5000)
+ --relay_log_max_size int Maximum buffer size (in bytes) for VReplication target buffering. If single rows are larger than this, a single row is buffered at a time. (default 250000)
+ --remote_operation_timeout duration time to wait for a remote operation (default 15s)
+ --replication_connect_retry duration how long to wait in between replica reconnect attempts. Only precise to the second. (default 10s)
+ --restore-to-pos string (init incremental restore parameter) if set, run a point in time recovery that ends with the given position. This will attempt to use one full backup followed by zero or more incremental backups
+ --restore-to-timestamp string (init incremental restore parameter) if set, run a point in time recovery that restores up to the given timestamp, if possible. Given timestamp in RFC3339 format. Example: '2006-01-02T15:04:05Z07:00'
+ --restore_concurrency int (init restore parameter) how many concurrent files to restore at once (default 4)
+ --restore_from_backup (init restore parameter) will check BackupStorage for a recent backup at startup and start there
+ --restore_from_backup_ts string (init restore parameter) if set, restore the latest backup taken at or before this timestamp. Example: '2021-04-29.133050'
+ --retain_online_ddl_tables duration How long should vttablet keep an old migrated table before purging it (default 24h0m0s)
+ --sanitize_log_messages Remove potentially sensitive information in tablet INFO, WARNING, and ERROR log messages such as query parameters.
+ --schema-change-reload-timeout duration query server schema change reload timeout, this is how long to wait for the signaled schema reload operation to complete before giving up (default 30s)
+ --schema-version-max-age-seconds int max age of schema version records to kept in memory by the vreplication historian
+ --schema_change_signal Enable the schema tracker; requires queryserver-config-schema-change-signal to be enabled on the underlying vttablets for this to work (default true)
+ --schema_dir string Schema base directory. Should contain one directory per keyspace, with a vschema.json file if necessary.
+ --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
+ --service_map strings comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-queryservice
+ --serving_state_grace_period duration how long to pause after broadcasting health to vtgate, before enforcing a new serving state
+ --shard_sync_retry_delay duration delay between retries of updates to keep the tablet and its shard record in sync (default 30s)
+ --shutdown_grace_period duration how long to wait (in seconds) for queries and transactions to complete during graceful shutdown. (default 0s)
+ --sql-max-length-errors int truncate queries in error logs to the given length (default unlimited)
+ --sql-max-length-ui int truncate queries in debug UIs to the given length (default 512) (default 512)
+ --srv_topo_cache_refresh duration how frequently to refresh the topology for cached entries (default 1s)
+ --srv_topo_cache_ttl duration how long to use cached entries for topology (default 1s)
+ --srv_topo_timeout duration topo server timeout (default 5s)
+ --start_mysql Should vtcombo also start mysql
+ --stats_backend string The name of the registered push-based monitoring/stats backend to use
+ --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
+ --stats_common_tags strings Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
+ --stats_drop_variables string Variables to be dropped from the list of exported variables.
+ --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
+ --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
+ --stream_buffer_size int the number of bytes sent from vtgate for each stream call. It's recommended to keep this value in sync with vttablet's query-server-config-stream-buffer-size. (default 32768)
+ --stream_health_buffer_size uint max streaming health entries to buffer per streaming health client (default 20)
+ --table-refresh-interval int interval in milliseconds to refresh tables in status page with refreshRequired class
+ --table_gc_lifecycle string States for a DROP TABLE garbage collection cycle. Default is 'hold,purge,evac,drop', use any subset ('drop' implcitly always included) (default "hold,purge,evac,drop")
+ --tablet_dir string The directory within the vtdataroot to store vttablet/mysql files. Defaults to being generated by the tablet uid.
+ --tablet_filters strings Specifies a comma-separated list of 'keyspace|shard_name or keyrange' values to filter the tablets to watch.
+ --tablet_health_keep_alive duration close streaming tablet health connection if there are no requests for this long (default 5m0s)
+ --tablet_hostname string if not empty, this hostname will be assumed instead of trying to resolve it
+ --tablet_manager_grpc_ca string the server ca to use to validate servers when connecting
+ --tablet_manager_grpc_cert string the cert to use to connect
+ --tablet_manager_grpc_concurrency int concurrency to use to talk to a vttablet server for performance-sensitive RPCs (like ExecuteFetchAs{Dba,AllPrivs,App}) (default 8)
+ --tablet_manager_grpc_connpool_size int number of tablets to keep tmclient connections open to (default 100)
+ --tablet_manager_grpc_crl string the server crl to use to validate server certificates when connecting
+ --tablet_manager_grpc_key string the key to use to connect
+ --tablet_manager_grpc_server_name string the server name to use to validate server certificate
+ --tablet_manager_protocol string Protocol to use to make tabletmanager RPCs to vttablets. (default "grpc")
+ --tablet_refresh_interval duration Tablet refresh interval. (default 1m0s)
+ --tablet_refresh_known_tablets Whether to reload the tablet's address/port map from topo in case they change. (default true)
+ --tablet_url_template string Format string describing debug tablet url formatting. See getTabletDebugURL() for how to customize this. (default "http://{{ "{{.GetTabletHostPort}}" }}")
+ --throttle_tablet_types string Comma separated VTTablet types to be considered by the throttler. default: 'replica'. example: 'replica,rdonly'. 'replica' aways implicitly included (default "replica")
+ --topo_consul_lock_delay duration LockDelay for consul session. (default 15s)
+ --topo_consul_lock_session_checks string List of checks for consul session. (default "serfHealth")
+ --topo_consul_lock_session_ttl string TTL for consul session.
+ --topo_consul_watch_poll_duration duration time of the long poll for watch queries. (default 30s)
+ --topo_etcd_lease_ttl int Lease TTL for locks and leader election. The client will use KeepAlive to keep the lease going. (default 30)
+ --topo_etcd_tls_ca string path to the ca to use to validate the server cert when connecting to the etcd topo server
+ --topo_etcd_tls_cert string path to the client cert to use to connect to the etcd topo server, requires topo_etcd_tls_key, enables TLS
+ --topo_etcd_tls_key string path to the client key to use to connect to the etcd topo server, enables TLS
+ --topo_global_root string the path of the global topology data in the global topology server
+ --topo_global_server_address string the address of the global topology server
+ --topo_implementation string the topology implementation to use
+ --topo_read_concurrency int Concurrency of topo reads. (default 32)
+ --topo_zk_auth_file string auth to use when connecting to the zk topo server, file contents should be :, e.g., digest:user:pass
+ --topo_zk_base_timeout duration zk base timeout (see zk.Connect) (default 30s)
+ --topo_zk_max_concurrency int maximum number of pending requests to send to a Zookeeper server. (default 64)
+ --topo_zk_tls_ca string the server ca to use to validate servers when connecting to the zk topo server
+ --topo_zk_tls_cert string the cert to use to connect to the zk topo server, requires topo_zk_tls_key, enables TLS
+ --topo_zk_tls_key string the key to use to connect to the zk topo server, enables TLS
+ --tracer string tracing service to use (default "noop")
+ --tracing-enable-logging whether to enable logging in the tracing service
+ --tracing-sampling-rate float sampling rate for the probabilistic jaeger sampler (default 0.1)
+ --tracing-sampling-type string sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote' (default "const")
+ --track_schema_versions When enabled, vttablet will store versions of schemas at each position that a DDL is applied and allow retrieval of the schema corresponding to a position
+ --transaction-log-stream-handler string URL handler for streaming transactions log (default "/debug/txlog")
+ --transaction_limit_by_component Include CallerID.component when considering who the user is for the purpose of transaction limit.
+ --transaction_limit_by_principal Include CallerID.principal when considering who the user is for the purpose of transaction limit. (default true)
+ --transaction_limit_by_subcomponent Include CallerID.subcomponent when considering who the user is for the purpose of transaction limit.
+ --transaction_limit_by_username Include VTGateCallerID.username when considering who the user is for the purpose of transaction limit. (default true)
+ --transaction_limit_per_user float Maximum number of transactions a single user is allowed to use at any time, represented as fraction of -transaction_cap. (default 0.4)
+ --transaction_mode string SINGLE: disallow multi-db transactions, MULTI: allow multi-db transactions with best effort commit, TWOPC: allow multi-db transactions with 2pc commit (default "MULTI")
+ --truncate-error-len int truncate errors sent to client if they are longer than this value (0 means do not truncate)
+ --twopc_abandon_age float time in seconds. Any unresolved transaction older than this time will be sent to the coordinator to be resolved.
+ --twopc_coordinator_address string address of the (VTGate) process(es) that will be used to notify of abandoned transactions.
+ --twopc_enable if the flag is on, 2pc is enabled. Other 2pc flags must be supplied.
+ --tx-throttler-config string Synonym to -tx_throttler_config (default "target_replication_lag_sec:2 max_replication_lag_sec:10 initial_rate:100 max_increase:1 emergency_decrease:0.5 min_duration_between_increases_sec:40 max_duration_between_increases_sec:62 min_duration_between_decreases_sec:20 spread_backlog_across_sec:20 age_bad_rate_after_sec:180 bad_rate_increase:0.1 max_rate_approach_threshold:0.9")
+ --tx-throttler-default-priority int Default priority assigned to queries that lack priority information (default 100)
+ --tx-throttler-dry-run If present, the transaction throttler only records metrics about requests received and throttled, but does not actually throttle any requests.
+ --tx-throttler-healthcheck-cells strings Synonym to -tx_throttler_healthcheck_cells
+ --tx-throttler-tablet-types strings A comma-separated list of tablet types. Only tablets of this type are monitored for replication lag by the transaction throttler. Supported types are replica and/or rdonly. (default replica)
+ --tx-throttler-topo-refresh-interval duration The rate that the transaction throttler will refresh the topology to find cells. (default 5m0s)
+ --tx_throttler_config string The configuration of the transaction throttler as a text-formatted throttlerdata.Configuration protocol buffer message. (default "target_replication_lag_sec:2 max_replication_lag_sec:10 initial_rate:100 max_increase:1 emergency_decrease:0.5 min_duration_between_increases_sec:40 max_duration_between_increases_sec:62 min_duration_between_decreases_sec:20 spread_backlog_across_sec:20 age_bad_rate_after_sec:180 bad_rate_increase:0.1 max_rate_approach_threshold:0.9")
+ --tx_throttler_healthcheck_cells strings A comma-separated list of cells. Only tabletservers running in these cells will be monitored for replication lag by the transaction throttler.
+ --unhealthy_threshold duration replication lag after which a replica is considered unhealthy (default 2h0m0s)
+ --v Level log level for V logs
+ -v, --version print binary version
+ --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
+ --vreplication-parallel-insert-workers int Number of parallel insertion workers to use during copy phase. Set <= 1 to disable parallelism, or > 1 to enable concurrent insertion during copy phase. (default 1)
+ --vreplication_copy_phase_duration duration Duration for each copy phase loop (before running the next catchup: default 1h) (default 1h0m0s)
+ --vreplication_copy_phase_max_innodb_history_list_length int The maximum InnoDB transaction history that can exist on a vstreamer (source) before starting another round of copying rows. This helps to limit the impact on the source tablet. (default 1000000)
+ --vreplication_copy_phase_max_mysql_replication_lag int The maximum MySQL replication lag (in seconds) that can exist on a vstreamer (source) before starting another round of copying rows. This helps to limit the impact on the source tablet. (default 43200)
+ --vreplication_healthcheck_retry_delay duration healthcheck retry delay (default 5s)
+ --vreplication_healthcheck_timeout duration healthcheck retry delay (default 1m0s)
+ --vreplication_healthcheck_topology_refresh duration refresh interval for re-reading the topology (default 30s)
+ --vreplication_heartbeat_update_interval int Frequency (in seconds, default 1, max 60) at which the time_updated column of a vreplication stream when idling (default 1)
+ --vreplication_max_time_to_retry_on_error duration stop automatically retrying when we've had consecutive failures with the same error for this long after the first occurrence
+ --vreplication_replica_lag_tolerance duration Replica lag threshold duration: once lag is below this we switch from copy phase to the replication (streaming) phase (default 1m0s)
+ --vreplication_retry_delay duration delay before retrying a failed workflow event in the replication phase (default 5s)
+ --vreplication_store_compressed_gtid Store compressed gtids in the pos column of the sidecar database's vreplication table
+ --vreplication_tablet_type string comma separated list of tablet types used as a source (default "in_order:REPLICA,PRIMARY")
+ --vschema-persistence-dir string If set, per-keyspace vschema will be persisted in this directory and reloaded into the in-memory topology server across restarts. Bookkeeping is performed using a simple watcher goroutine. This is useful when running vtcombo as an application development container (e.g. vttestserver) where you want to keep the same vschema even if developer's machine reboots. This works in tandem with vttestserver's --persistent_mode flag. Needless to say, this is neither a perfect nor a production solution for vschema persistence. Consider using the --external_topo_server flag if you require a more complete solution. This flag is ignored if --external_topo_server is set.
+ --vschema_ddl_authorized_users string List of users authorized to execute vschema ddl operations, or '%' to allow all users.
+ --vstream-binlog-rotation-threshold int Byte size at which a VStreamer will attempt to rotate the source's open binary log before starting a GTID snapshot based stream (e.g. a ResultStreamer or RowStreamer) (default 67108864)
+ --vstream_dynamic_packet_size Enable dynamic packet sizing for VReplication. This will adjust the packet size during replication to improve performance. (default true)
+ --vstream_packet_size int Suggested packet size for VReplication streamer. This is used only as a recommendation. The actual packet size may be more or less than this amount. (default 250000)
+ --vtctld_sanitize_log_messages When true, vtctld sanitizes logging.
+ --vtgate-config-terse-errors prevent bind vars from escaping in returned errors
+ --vtgate_grpc_ca string the server ca to use to validate servers when connecting
+ --vtgate_grpc_cert string the cert to use to connect
+ --vtgate_grpc_crl string the server crl to use to validate server certificates when connecting
+ --vtgate_grpc_key string the key to use to connect
+ --vtgate_grpc_server_name string the server name to use to validate server certificate
+ --vttablet_skip_buildinfo_tags string comma-separated list of buildinfo tags to skip from merging with --init_tags. each tag is either an exact match or a regular expression of the form '/regexp/'. (default "/.*/")
+ --wait_for_backup_interval duration (init restore parameter) if this is greater than 0, instead of starting up empty when no backups are found, keep checking at this interval for a backup to appear
+ --warn_memory_rows int Warning threshold for in-memory results. A row count higher than this amount will cause the VtGateWarnings.ResultsExceeded counter to be incremented. (default 30000)
+ --warn_payload_size int The warning threshold for query payloads in bytes. A payload greater than this threshold will cause the VtGateWarnings.WarnPayloadSizeExceeded counter to be incremented.
+ --warn_sharded_only If any features that are only available in unsharded mode are used, query execution warnings will be added to the session
+ --watch_replication_stream When enabled, vttablet will stream the MySQL replication stream from the local server, and use it to update schema when it sees a DDL.
+ --xbstream_restore_flags string Flags to pass to xbstream command during restore. These should be space separated and will be added to the end of the command. These need to match the ones used for backup e.g. --compress / --decompress, --encrypt / --decrypt
+ --xtrabackup_backup_flags string Flags to pass to backup command. These should be space separated and will be added to the end of the command
+ --xtrabackup_prepare_flags string Flags to pass to prepare command. These should be space separated and will be added to the end of the command
+ --xtrabackup_root_path string Directory location of the xtrabackup and xbstream executables, e.g., /usr/bin
+ --xtrabackup_stream_mode string Which mode to use if streaming, valid values are tar and xbstream. Please note that tar is not supported in XtraBackup 8.0 (default "tar")
+ --xtrabackup_stripe_block_size uint Size in bytes of each block that gets sent to a given stripe before rotating to the next stripe (default 102400)
+ --xtrabackup_stripes uint If greater than 0, use data striping across this many destination files to parallelize data transfer and decompression
+ --xtrabackup_user string User that xtrabackup will use to connect to the database server. This user must have all necessary privileges. For details, please refer to xtrabackup documentation.
diff --git a/go/flags/endtoend/vtctld.txt b/go/flags/endtoend/vtctld.txt
index 0c3c9f07c3b..eb83fc80a7b 100644
--- a/go/flags/endtoend/vtctld.txt
+++ b/go/flags/endtoend/vtctld.txt
@@ -1,4 +1,26 @@
-Usage of vtctld:
+vtctld provides web and gRPC interfaces to manage a single Vitess cluster.
+It is usually the first Vitess component to be started after a valid global topology service has been created.
+
+For the last several releases, vtctld has been transitioning to a newer gRPC service for well-typed cluster management requests.
+This is **required** to use programs such as vtadmin and vtctldclient, and The old API and service are deprecated and will be removed in a future release.
+To enable this newer service, include "grpc-vtctld" in the --service_map argument.
+This is demonstrated in the example usage below.
+
+Usage:
+ vtctld [flags]
+
+Examples:
+vtctld \
+ --topo_implementation etcd2 \
+ --topo_global_server_address localhost:2379 \
+ --topo_global_root /vitess/ \
+ --service_map 'grpc-vtctl,grpc-vtctld' \
+ --backup_storage_implementation file \
+ --file_backup_storage_root $VTDATAROOT/backups \
+ --port 15000 \
+ --grpc_port 15999
+
+Flags:
--action_timeout duration time to wait for an action before resorting to force (default 1m0s)
--alsologtostderr log to standard error as well as files
--azblob_backup_account_key_file string Path to a file containing the Azure Storage account key; if this flag is unset, the environment variable VT_AZBLOB_ACCOUNT_KEY will be used as the key itself (NOT a file path).
@@ -12,6 +34,7 @@ Usage of vtctld:
--backup_storage_compress if set, the backup files will be compressed. (default true)
--backup_storage_implementation string Which backup storage implementation to use for creating and restoring backups.
--backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, in parallel, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression. (default 2)
+ --bind-address string Bind address for the server. If empty, the server will listen on all available unicast and anycast IP addresses of the local system.
--builtinbackup-file-read-buffer-size uint read files using an IO buffer of this many bytes. Golang defaults are used when set to 0.
--builtinbackup-file-write-buffer-size uint write files using an IO buffer of this many bytes. Golang defaults are used when set to 0. (default 2097152)
--builtinbackup_mysqld_timeout duration how long to wait for mysqld to shutdown at the start of the backup. (default 10m0s)
@@ -37,6 +60,7 @@ Usage of vtctld:
--grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon).
--grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server.
--grpc_auth_static_password_file string JSON File to read the users/passwords from.
+ --grpc_bind_address string Bind address for gRPC calls. If empty, listen on all addresses.
--grpc_ca string server CA to use for gRPC connections, requires TLS, and enforces client certificate check
--grpc_cert string server certificate to use for gRPC connections, requires grpc_key, enables TLS
--grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
@@ -58,7 +82,7 @@ Usage of vtctld:
--grpc_server_initial_window_size int gRPC server initial window size
--grpc_server_keepalive_enforcement_policy_min_time duration gRPC server minimum keepalive time (default 10s)
--grpc_server_keepalive_enforcement_policy_permit_without_stream gRPC server permit client keepalive pings even when there are no active streams (RPCs)
- -h, --help display usage and exit
+ -h, --help help for vtctld
--jaeger-agent-host string host and port to send spans to. if empty, no tracing will be done
--keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
--keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
@@ -136,9 +160,6 @@ Usage of vtctld:
--topo_global_root string the path of the global topology data in the global topology server
--topo_global_server_address string the address of the global topology server
--topo_implementation string the topology implementation to use
- --topo_k8s_context string The kubeconfig context to use, overrides the 'current-context' from the config
- --topo_k8s_kubeconfig string Path to a valid kubeconfig file. When running as a k8s pod inside the same cluster you wish to use as the topo, you may omit this and the below arguments, and Vitess is capable of auto-discovering the correct values. https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod
- --topo_k8s_namespace string The kubernetes namespace to use for all objects. Default comes from the context or in-cluster config
--topo_read_concurrency int Concurrency of topo reads. (default 32)
--topo_zk_auth_file string auth to use when connecting to the zk topo server, file contents should be :, e.g., digest:user:pass
--topo_zk_base_timeout duration zk base timeout (see zk.Connect) (default 30s)
diff --git a/go/flags/endtoend/vtctldclient.txt b/go/flags/endtoend/vtctldclient.txt
index 0be37194bc0..74c74b93cb0 100644
--- a/go/flags/endtoend/vtctldclient.txt
+++ b/go/flags/endtoend/vtctldclient.txt
@@ -51,6 +51,12 @@ Available Commands:
GetVSchema Prints a JSON representation of a keyspace's topo record.
GetWorkflows Gets all vreplication workflows (Reshard, MoveTables, etc) in the given keyspace.
LegacyVtctlCommand Invoke a legacy vtctlclient command. Flag parsing is best effort.
+ LookupVindex Perform commands related to creating, backfilling, and externalizing Lookup Vindexes using VReplication workflows.
+ Materialize Perform commands related to materializing query results from the source keyspace into tables in the target keyspace.
+ Migrate Migrate is used to import data from an external cluster into the current cluster.
+ Mount Mount is used to link an external Vitess cluster in order to migrate data from it.
+ MoveTables Perform commands related to moving tables from a source keyspace to a target keyspace.
+ OnlineDDL Operates on online DDL (schema migrations).
PingTablet Checks that the specified tablet is awake and responding to RPCs. This command can be blocked by other in-flight operations.
PlannedReparentShard Reparents the shard to a new primary, or away from an old primary. Both the old and new primaries must be up and running.
RebuildKeyspaceGraph Rebuilds the serving data for the keyspace(s). This command may trigger an update to all connected clients.
@@ -64,6 +70,7 @@ Available Commands:
RemoveKeyspaceCell Removes the specified cell from the Cells list for all shards in the specified keyspace (by calling RemoveShardCell on every shard). It also removes the SrvKeyspace for that keyspace in that cell.
RemoveShardCell Remove the specified cell from the specified shard's Cells list.
ReparentTablet Reparent a tablet to the current primary in the shard.
+ Reshard Perform commands related to resharding a keyspace.
RestoreFromBackup Stops mysqld on the specified tablet and restores the data from either the latest backup or closest before `backup-timestamp`.
RunHealthCheck Runs a healthcheck on the remote tablet.
SetKeyspaceDurabilityPolicy Sets the durability-policy used by the specified keyspace.
@@ -81,19 +88,21 @@ Available Commands:
UpdateCellInfo Updates the content of a CellInfo with the provided parameters, creating the CellInfo if it does not exist.
UpdateCellsAlias Updates the content of a CellsAlias with the provided parameters, creating the CellsAlias if it does not exist.
UpdateThrottlerConfig Update the tablet throttler configuration for all tablets in the given keyspace (across all cells)
+ VDiff Perform commands related to diffing tables involved in a VReplication workflow between the source and target.
Validate Validates that all nodes reachable from the global replication graph, as well as all tablets in discoverable cells, are consistent.
ValidateKeyspace Validates that all nodes reachable from the specified keyspace are consistent.
ValidateSchemaKeyspace Validates that the schema on the primary tablet for shard 0 matches the schema on all other tablets in the keyspace.
ValidateShard Validates that all nodes reachable from the specified shard are consistent.
ValidateVersionKeyspace Validates that the version on the primary tablet of shard 0 matches all of the other tablets in the keyspace.
ValidateVersionShard Validates that the version on the primary matches all of the replicas.
+ Workflow Administer VReplication workflows (Reshard, MoveTables, etc) in the given keyspace.
completion Generate the autocompletion script for the specified shell
help Help about any command
- workflow Administer VReplication workflows (Reshard, MoveTables, etc) in the given keyspace
Flags:
- --action_timeout duration timeout for the total command (default 1h0m0s)
+ --action_timeout duration timeout to use for the command (default 1h0m0s)
--alsologtostderr log to standard error as well as files
+ --compact use compact format for otherwise verbose outputs
--grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server.
--grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
--grpc_enable_tracing Enable gRPC tracing.
@@ -113,7 +122,7 @@ Flags:
--mysql_server_version string MySQL server version to advertise. (default "8.0.30-Vitess")
--purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
--security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
- --server string server to use for connection (required)
+ --server string server to use for the connection (required)
--stderrthreshold severity logs at or above this threshold go to stderr (default 1)
-v, --v Level log level for V logs
--version version for vtctldclient
diff --git a/go/flags/endtoend/vtexplain.txt b/go/flags/endtoend/vtexplain.txt
index dff8d2c2bbe..d9c1ded1538 100644
--- a/go/flags/endtoend/vtexplain.txt
+++ b/go/flags/endtoend/vtexplain.txt
@@ -1,4 +1,43 @@
-Usage of vtexplain:
+vtexplain is a command line tool which provides information on how Vitess plans to execute a particular query.
+
+It can be used to validate queries for compatibility with Vitess.
+
+For a user guide that describes how to use the vtexplain tool to explain how Vitess executes a particular SQL statement, see Analyzing a SQL statement.
+
+## Limitations
+
+### The VSchema must use a keyspace name.
+
+VTExplain requires a keyspace name for each keyspace in an input VSchema:
+```
+"keyspace_name": {
+ "_comment": "Keyspace definition goes here."
+}
+```
+
+If no keyspace name is present, VTExplain will return the following error:
+```
+ERROR: initVtgateExecutor: json: cannot unmarshal bool into Go value of type map[string]json.RawMessage
+```
+
+Usage:
+ vtexplain [flags]
+
+Examples:
+Explain how Vitess will execute the query `SELECT * FROM users` using the VSchema contained in `vschemas.json` and the database schema `schema.sql`:
+
+```
+vtexplain --vschema-file vschema.json --schema-file schema.sql --sql "SELECT * FROM users"
+```
+
+Explain how the example will execute on 128 shards using Row-based replication:
+
+```
+vtexplain -- -shards 128 --vschema-file vschema.json --schema-file schema.sql --replication-mode "ROW" --output-mode text --sql "INSERT INTO users (user_id, name) VALUES(1, 'john')"
+```
+
+
+Flags:
--alsologtostderr log to standard error as well as files
--batch-interval duration Interval between logical time slots. (default 10ms)
--config-file string Full path of the config file (with extension) to use. If set, --config-path, --config-type, and --config-name are ignored.
@@ -10,7 +49,7 @@ Usage of vtexplain:
--dbname string Optional database target to override normal routing
--default_tablet_type topodatapb.TabletType The default tablet type to set for queries, when one is not explicitly selected. (default PRIMARY)
--execution-mode string The execution mode to simulate -- must be set to multi, legacy-autocommit, or twopc (default "multi")
- -h, --help display usage and exit
+ -h, --help help for vtexplain
--keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
--keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
--ks-shard-map string JSON map of keyspace name -> shard name -> ShardReference object. The inner map is the same as the output of FindAllShardsInKeyspace
@@ -24,7 +63,7 @@ Usage of vtexplain:
--mysql_server_version string MySQL server version to advertise. (default "8.0.30-Vitess")
--normalize Whether to enable vtgate normalization
--output-mode string Output in human-friendly text or json (default "text")
- --planner-version string Sets the query planner version to use when generating the explain output. Valid values are V3 and Gen4. An empty value will use VTGate's default planner
+ --planner-version string Sets the default planner to use. Valid values are: Gen4, Gen4Greedy, Gen4Left2Right
--pprof strings enable profiling
--purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
--replication-mode string The replication mode to simulate -- must be set to either ROW or STATEMENT (default "ROW")
diff --git a/go/flags/endtoend/vtgate.txt b/go/flags/endtoend/vtgate.txt
index 9240ee0682b..0611a387abd 100644
--- a/go/flags/endtoend/vtgate.txt
+++ b/go/flags/endtoend/vtgate.txt
@@ -1,8 +1,35 @@
-Usage of vtgate:
+VTGate is a stateless proxy responsible for accepting requests from applications and routing them to the appropriate tablet server(s) for query execution. It speaks both the MySQL Protocol and a gRPC protocol.
+
+### Key Options
+
+* `--srv_topo_cache_ttl`: There may be instances where you will need to increase the cached TTL from the default of 1 second to a higher number:
+ * You may want to increase this option if you see that your topo leader goes down and keeps your queries waiting for a few seconds.
+
+Usage:
+ vtgate [flags]
+
+Examples:
+vtgate \
+ --topo_implementation etcd2 \
+ --topo_global_server_address localhost:2379 \
+ --topo_global_root /vitess/global \
+ --log_dir $VTDATAROOT/tmp \
+ --port 15001 \
+ --grpc_port 15991 \
+ --mysql_server_port 15306 \
+ --cell test \
+ --cells_to_watch test \
+ --tablet_types_to_wait PRIMARY,REPLICA \
+ --service_map 'grpc-vtgateservice' \
+ --pid_file $VTDATAROOT/tmp/vtgate.pid \
+ --mysql_auth_server_impl none
+
+Flags:
+ --allow-kill-statement Allows the execution of kill statement
--allowed_tablet_types strings Specifies the tablet types this vtgate is allowed to route queries to. Should be provided as a comma-separated set of tablet types.
--alsologtostderr log to standard error as well as files
+ --bind-address string Bind address for the server. If empty, the server will listen on all available unicast and anycast IP addresses of the local system.
--buffer_drain_concurrency int Maximum number of requests retried simultaneously. More concurrency will increase the load on the PRIMARY vttablet when draining the buffer. (default 1)
- --buffer_implementation string Allowed values: healthcheck (legacy implementation), keyspace_events (default) (default "keyspace_events")
--buffer_keyspace_shards string If not empty, limit buffering to these entries (comma separated). Entry format: keyspace or keyspace/shard. Requires --enable_buffer=true.
--buffer_max_failover_duration duration Stop buffering completely if a failover takes longer than this duration. (default 20s)
--buffer_min_time_between_failovers duration Minimum time between the end of a failover and the start of the next one (tracked per shard). Faster consecutive failovers will not trigger buffering. (default 1m0s)
@@ -35,16 +62,16 @@ Usage of vtgate:
--enable_set_var This will enable the use of MySQL's SET_VAR query hint for certain system variables instead of using reserved connections (default true)
--enable_system_settings This will enable the system settings to be changed per session at the database connection level (default true)
--foreign_key_mode string This is to provide how to handle foreign key constraint in create/alter table. Valid values are: allow, disallow (default "allow")
- --gate_query_cache_lfu gate server cache algorithm. when set to true, a new cache algorithm based on a TinyLFU admission policy will be used to improve cache behavior and prevent pollution from sparse queries (default true)
--gate_query_cache_memory int gate server query cache size in bytes, maximum amount of memory to be cached. vtgate analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache. (default 33554432)
- --gate_query_cache_size int gate server query cache size, maximum number of queries to be cached. vtgate analyzes every incoming query and generate a query plan, these plans are being cached in a cache. This config controls the expected amount of unique entries in the cache. (default 5000)
--gateway_initial_tablet_timeout duration At startup, the tabletGateway will wait up to this duration to get at least one tablet per keyspace/shard/tablet type (default 30s)
+ --grpc-send-session-in-streaming If set, will send the session as last packet in streaming api to support transactions in streaming
--grpc-use-effective-groups If set, and SSL is not used, will set the immediate caller's security groups from the effective caller id's groups.
--grpc-use-static-authentication-callerid If set, will set the immediate caller id to the username authenticated by the static auth plugin.
--grpc_auth_mode string Which auth plugin implementation to use (eg: static)
--grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon).
--grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server.
--grpc_auth_static_password_file string JSON File to read the users/passwords from.
+ --grpc_bind_address string Bind address for gRPC calls. If empty, listen on all addresses.
--grpc_ca string server CA to use for gRPC connections, requires TLS, and enforces client certificate check
--grpc_cert string server certificate to use for gRPC connections, requires grpc_key, enables TLS
--grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
@@ -69,7 +96,7 @@ Usage of vtgate:
--grpc_use_effective_callerid If set, and SSL is not used, will set the immediate caller id from the effective caller id's principal.
--healthcheck_retry_delay duration health check retry delay (default 2ms)
--healthcheck_timeout duration the health check timeout period (default 1m0s)
- -h, --help display usage and exit
+ -h, --help help for vtgate
--jaeger-agent-host string host and port to send spans to. if empty, no tracing will be done
--keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
--keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
@@ -90,6 +117,7 @@ Usage of vtgate:
--max_payload_size int The threshold for query payloads in bytes. A payload greater than this threshold will result in a failure to handle the query.
--message_stream_grace_period duration the amount of time to give for a vttablet to resume if it ends a message stream, usually because of a reparent. (default 30s)
--min_number_serving_vttablets int The minimum number of vttablets for each replicating tablet_type (e.g. replica, rdonly) that will be continue to be used even with replication lag above discovery_low_replication_lag, but still below discovery_high_replication_lag_minimum_serving. (default 2)
+ --mysql-server-keepalive-period duration TCP period between keep-alives
--mysql-server-pool-conn-read-buffers If set, the server will pool incoming connection read buffers
--mysql_allow_clear_text_without_tls If set, the server will allow the use of a clear text password over non-SSL connections.
--mysql_auth_server_config_file string JSON File to read the users/passwords from.
@@ -136,7 +164,7 @@ Usage of vtgate:
--onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s)
--opentsdb_uri string URI of opentsdb /api/put method
--pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
- --planner-version string Sets the default planner to use when the session has not changed it. Valid values are: V3, V3Insert, Gen4, Gen4Greedy and Gen4Fallback. Gen4Fallback tries the gen4 planner and falls back to the V3 planner if the gen4 fails.
+ --planner-version string Sets the default planner to use when the session has not changed it. Valid values are: Gen4, Gen4Greedy, Gen4Left2Right
--port int port for the server
--pprof strings enable profiling
--proxy_protocol Enable HAProxy PROXY protocol on MySQL listener socket
@@ -150,7 +178,6 @@ Usage of vtgate:
--remote_operation_timeout duration time to wait for a remote operation (default 15s)
--retry-count int retry count (default 2)
--schema_change_signal Enable the schema tracker; requires queryserver-config-schema-change-signal to be enabled on the underlying vttablets for this to work (default true)
- --schema_change_signal_user string User to be used to send down query to vttablet to retrieve schema changes
--security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
--service_map strings comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-queryservice
--sql-max-length-errors int truncate queries in error logs to the given length (default unlimited)
@@ -192,9 +219,6 @@ Usage of vtgate:
--topo_global_root string the path of the global topology data in the global topology server
--topo_global_server_address string the address of the global topology server
--topo_implementation string the topology implementation to use
- --topo_k8s_context string The kubeconfig context to use, overrides the 'current-context' from the config
- --topo_k8s_kubeconfig string Path to a valid kubeconfig file. When running as a k8s pod inside the same cluster you wish to use as the topo, you may omit this and the below arguments, and Vitess is capable of auto-discovering the correct values. https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod
- --topo_k8s_namespace string The kubernetes namespace to use for all objects. Default comes from the context or in-cluster config
--topo_read_concurrency int Concurrency of topo reads. (default 32)
--topo_zk_auth_file string auth to use when connecting to the zk topo server, file contents should be :, e.g., digest:user:pass
--topo_zk_base_timeout duration zk base timeout (see zk.Connect) (default 30s)
diff --git a/go/flags/endtoend/vtgateclienttest.txt b/go/flags/endtoend/vtgateclienttest.txt
new file mode 100644
index 00000000000..4580d4d6ce7
--- /dev/null
+++ b/go/flags/endtoend/vtgateclienttest.txt
@@ -0,0 +1,67 @@
+vtgateclienttest is a chain of vtgateservice.VTGateService implementations, each one being responsible for one test scenario.
+
+Usage:
+ vtgateclienttest [flags]
+
+Flags:
+ --alsologtostderr log to standard error as well as files
+ --bind-address string Bind address for the server. If empty, the server will listen on all available unicast and anycast IP addresses of the local system.
+ --catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified
+ --config-file string Full path of the config file (with extension) to use. If set, --config-path, --config-type, and --config-name are ignored.
+ --config-file-not-found-handling ConfigFileNotFoundHandling Behavior when a config file is not found. (Options: error, exit, ignore, warn) (default warn)
+ --config-name string Name of the config file (without extension) to search for. (default "vtconfig")
+ --config-path strings Paths to search for config files in. (default [{{ .Workdir }}])
+ --config-persistence-min-interval duration minimum interval between persisting dynamic config changes back to disk (if no change has occurred, nothing is done). (default 1s)
+ --config-type string Config file type (omit to infer config type from file extension).
+ --default_tablet_type topodatapb.TabletType The default tablet type to set for queries, when one is not explicitly selected. (default PRIMARY)
+ --grpc_auth_mode string Which auth plugin implementation to use (eg: static)
+ --grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon).
+ --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server.
+ --grpc_auth_static_password_file string JSON File to read the users/passwords from.
+ --grpc_bind_address string Bind address for gRPC calls. If empty, listen on all addresses.
+ --grpc_ca string server CA to use for gRPC connections, requires TLS, and enforces client certificate check
+ --grpc_cert string server certificate to use for gRPC connections, requires grpc_key, enables TLS
+ --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
+ --grpc_crl string path to a certificate revocation list in PEM format, client certificates will be further verified against this file during TLS handshake
+ --grpc_enable_optional_tls enable optional TLS mode when a server accepts both TLS and plain-text connections on the same port
+ --grpc_enable_tracing Enable gRPC tracing.
+ --grpc_initial_conn_window_size int gRPC initial connection window size
+ --grpc_initial_window_size int gRPC initial window size
+ --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
+ --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s)
+ --grpc_key string server private key to use for gRPC connections, requires grpc_cert, enables TLS
+ --grpc_max_connection_age duration Maximum age of a client connection before GoAway is sent. (default 2562047h47m16.854775807s)
+ --grpc_max_connection_age_grace duration Additional grace period after grpc_max_connection_age, after which connections are forcibly closed. (default 2562047h47m16.854775807s)
+ --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 16777216)
+ --grpc_port int Port to listen on for gRPC calls. If zero, do not listen.
+ --grpc_prometheus Enable gRPC monitoring with Prometheus.
+ --grpc_server_ca string path to server CA in PEM format, which will be combine with server cert, return full certificate chain to clients
+ --grpc_server_initial_conn_window_size int gRPC server initial connection window size
+ --grpc_server_initial_window_size int gRPC server initial window size
+ --grpc_server_keepalive_enforcement_policy_min_time duration gRPC server minimum keepalive time (default 10s)
+ --grpc_server_keepalive_enforcement_policy_permit_without_stream gRPC server permit client keepalive pings even when there are no active streams (RPCs)
+ -h, --help help for vtgateclienttest
+ --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
+ --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
+ --lameduck-period duration keep running at least this long after SIGTERM before stopping (default 50ms)
+ --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
+ --log_dir string If non-empty, write log files in this directory
+ --log_err_stacks log stack traces for errors
+ --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
+ --logtostderr log to standard error instead of files
+ --max-stack-size int configure the maximum stack size in bytes (default 67108864)
+ --mysql_server_version string MySQL server version to advertise. (default "8.0.30-Vitess")
+ --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 10s)
+ --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s)
+ --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
+ --port int port for the server
+ --pprof strings enable profiling
+ --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
+ --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
+ --service_map strings comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-queryservice
+ --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
+ --table-refresh-interval int interval in milliseconds to refresh tables in status page with refreshRequired class
+ --v Level log level for V logs
+ -v, --version print binary version
+ --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
+ --vschema_ddl_authorized_users string List of users authorized to execute vschema ddl operations, or '%' to allow all users.
diff --git a/go/flags/endtoend/vtgr.txt b/go/flags/endtoend/vtgr.txt
deleted file mode 100644
index 56698f88e23..00000000000
--- a/go/flags/endtoend/vtgr.txt
+++ /dev/null
@@ -1,93 +0,0 @@
-vtgr is deprecated and will be removed in Vitess 18. We recommend using VTOrc with semi-sync replication instead.
-Usage of vtgr:
- --abort_rebootstrap Don't allow vtgr to rebootstrap an existing group.
- --alsologtostderr log to standard error as well as files
- --clusters_to_watch strings Comma-separated list of keyspaces or keyspace/shards that this instance will monitor and repair. Defaults to all clusters in the topology. Example: "ks1,ks2/-80"
- --config-file string Full path of the config file (with extension) to use. If set, --config-path, --config-type, and --config-name are ignored.
- --config-file-not-found-handling ConfigFileNotFoundHandling Behavior when a config file is not found. (Options: error, exit, ignore, warn) (default warn)
- --config-name string Name of the config file (without extension) to search for. (default "vtconfig")
- --config-path strings Paths to search for config files in. (default [{{ .Workdir }}])
- --config-persistence-min-interval duration minimum interval between persisting dynamic config changes back to disk (if no change has occurred, nothing is done). (default 1s)
- --config-type string Config file type (omit to infer config type from file extension).
- --consul_auth_static_file string JSON File to read the topos/tokens from.
- --db-credentials-file string db credentials file; send SIGHUP to reload this file
- --db-credentials-server string db credentials server type ('file' - file implementation; 'vault' - HashiCorp Vault implementation) (default "file")
- --db-credentials-vault-addr string URL to Vault server
- --db-credentials-vault-path string Vault path to credentials JSON blob, e.g.: secret/data/prod/dbcreds
- --db-credentials-vault-role-mountpoint string Vault AppRole mountpoint; can also be passed using VAULT_MOUNTPOINT environment variable (default "approle")
- --db-credentials-vault-role-secretidfile string Path to file containing Vault AppRole secret_id; can also be passed using VAULT_SECRETID environment variable
- --db-credentials-vault-roleid string Vault AppRole id; can also be passed using VAULT_ROLEID environment variable
- --db-credentials-vault-timeout duration Timeout for vault API operations (default 10s)
- --db-credentials-vault-tls-ca string Path to CA PEM for validating Vault server certificate
- --db-credentials-vault-tokenfile string Path to file containing Vault auth token; token can also be passed using VAULT_TOKEN environment variable
- --db-credentials-vault-ttl duration How long to cache DB credentials from the Vault server (default 30m0s)
- --db_config string Full path to db config file that will be used by VTGR.
- --db_flavor string MySQL flavor override. (default "MySQL56")
- --db_port int Local mysql port, set this to enable local fast check.
- --emit_stats If set, emit stats to push-based monitoring and stats backends
- --enable_heartbeat_check Enable heartbeat checking, set together with --group_heartbeat_threshold.
- --gr_port int Port to bootstrap a MySQL group. (default 33061)
- --group_heartbeat_threshold int VTGR will trigger backoff on inconsistent state if the group heartbeat staleness exceeds this threshold (in seconds). Should be used along with --enable_heartbeat_check.
- --grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server.
- --grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
- --grpc_enable_tracing Enable gRPC tracing.
- --grpc_initial_conn_window_size int gRPC initial connection window size
- --grpc_initial_window_size int gRPC initial window size
- --grpc_keepalive_time duration After a duration of this time, if the client doesn't see any activity, it pings the server to see if the transport is still alive. (default 10s)
- --grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s)
- --grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 268435456)
- --grpc_prometheus Enable gRPC monitoring with Prometheus.
- -h, --help display usage and exit
- --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
- --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
- --lock-timeout duration Maximum time for which a shard/keyspace lock can be acquired for (default 45s)
- --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
- --log_dir string If non-empty, write log files in this directory
- --log_err_stacks log stack traces for errors
- --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
- --logtostderr log to standard error instead of files
- --ping_tablet_timeout duration time to wait when we ping a tablet (default 2s)
- --pprof strings enable profiling
- --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
- --refresh_interval duration Refresh interval to load tablets. (default 10s)
- --remote_operation_timeout duration time to wait for a remote operation (default 15s)
- --scan_interval duration Scan interval to diagnose and repair. (default 3s)
- --scan_repair_timeout duration Time to wait for a Diagnose and repair operation. (default 3s)
- --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
- --stats_backend string The name of the registered push-based monitoring/stats backend to use
- --stats_combine_dimensions string List of dimensions to be combined into a single "all" value in exported stats vars
- --stats_common_tags strings Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2
- --stats_drop_variables string Variables to be dropped from the list of exported variables.
- --stats_emit_period duration Interval between emitting stats to all registered backends (default 1m0s)
- --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
- --tablet_manager_grpc_ca string the server ca to use to validate servers when connecting
- --tablet_manager_grpc_cert string the cert to use to connect
- --tablet_manager_grpc_concurrency int concurrency to use to talk to a vttablet server for performance-sensitive RPCs (like ExecuteFetchAs{Dba,AllPrivs,App}) (default 8)
- --tablet_manager_grpc_connpool_size int number of tablets to keep tmclient connections open to (default 100)
- --tablet_manager_grpc_crl string the server crl to use to validate server certificates when connecting
- --tablet_manager_grpc_key string the key to use to connect
- --tablet_manager_grpc_server_name string the server name to use to validate server certificate
- --tablet_manager_protocol string Protocol to use to make tabletmanager RPCs to vttablets. (default "grpc")
- --topo_consul_lock_delay duration LockDelay for consul session. (default 15s)
- --topo_consul_lock_session_checks string List of checks for consul session. (default "serfHealth")
- --topo_consul_lock_session_ttl string TTL for consul session.
- --topo_consul_watch_poll_duration duration time of the long poll for watch queries. (default 30s)
- --topo_etcd_lease_ttl int Lease TTL for locks and leader election. The client will use KeepAlive to keep the lease going. (default 30)
- --topo_etcd_password string password to use to validate the server cert when connecting to the etcd topo server
- --topo_etcd_tls_ca string path to the ca to use to validate the server cert when connecting to the etcd topo server
- --topo_etcd_tls_cert string path to the client cert to use to connect to the etcd topo server, requires topo_etcd_tls_key, enables TLS
- --topo_etcd_tls_key string path to the client key to use to connect to the etcd topo server, enables TLS
- --topo_etcd_username string username to use to validate the server cert when connecting to the etcd topo server
- --topo_global_root string the path of the global topology data in the global topology server
- --topo_global_server_address string the address of the global topology server
- --topo_implementation string the topology implementation to use
- --topo_zk_auth_file string auth to use when connecting to the zk topo server, file contents should be :, e.g., digest:user:pass
- --topo_zk_base_timeout duration zk base timeout (see zk.Connect) (default 30s)
- --topo_zk_max_concurrency int maximum number of pending requests to send to a Zookeeper server. (default 64)
- --topo_zk_tls_ca string the server ca to use to validate servers when connecting to the zk topo server
- --topo_zk_tls_cert string the cert to use to connect to the zk topo server, requires topo_zk_tls_key, enables TLS
- --topo_zk_tls_key string the key to use to connect to the zk topo server, enables TLS
- --v Level log level for V logs
- -v, --version print binary version
- --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
- --vtgr_config string Config file for vtgr.
diff --git a/go/flags/endtoend/vtorc.txt b/go/flags/endtoend/vtorc.txt
index e8ab0515343..495df9b3e65 100644
--- a/go/flags/endtoend/vtorc.txt
+++ b/go/flags/endtoend/vtorc.txt
@@ -1,10 +1,30 @@
-Usage of vtorc:
+VTOrc is the automated fault detection and repair tool in Vitess.
+
+Usage:
+ vtorc [flags]
+
+Examples:
+vtorc \
+ --topo_implementation etcd2 \
+ --topo_global_server_address localhost:2379 \
+ --topo_global_root /vitess/global \
+ --log_dir $VTDATAROOT/tmp \
+ --port 15000 \
+ --recovery-period-block-duration "10m" \
+ --instance-poll-time "1s" \
+ --topo-information-refresh-duration "30s" \
+ --alsologtostderr
+
+Flags:
+ --allow-emergency-reparent Whether VTOrc should be allowed to run emergency reparent operation when it detects a dead primary (default true)
--alsologtostderr log to standard error as well as files
--audit-file-location string File location where the audit logs are to be stored
--audit-purge-duration duration Duration for which audit logs are held before being purged. Should be in multiples of days (default 168h0m0s)
--audit-to-backend Whether to store the audit log in the VTOrc database
--audit-to-syslog Whether to store the audit log in the syslog
+ --bind-address string Bind address for the server. If empty, the server will listen on all available unicast and anycast IP addresses of the local system.
--catch-sigpipe catch and ignore SIGPIPE on stdout and stderr if specified
+ --change-tablets-with-errant-gtid-to-drained Whether VTOrc should be changing the type of tablets with errant GTIDs to DRAINED
--clusters_to_watch strings Comma-separated list of keyspaces or keyspace/shards that this instance will monitor and repair. Defaults to all clusters in the topology. Example: "ks1,ks2/-80"
--config string config file name
--config-file string Full path of the config file (with extension) to use. If set, --config-path, --config-type, and --config-name are ignored.
@@ -24,7 +44,7 @@ Usage of vtorc:
--grpc_keepalive_timeout duration After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that the connection is closed. (default 10s)
--grpc_max_message_size int Maximum allowed RPC message size. Larger messages will be rejected by gRPC with the error 'exceeding the max size'. (default 268435456)
--grpc_prometheus Enable gRPC monitoring with Prometheus.
- -h, --help display usage and exit
+ -h, --help help for vtorc
--instance-poll-time duration Timer duration on which VTOrc refreshes MySQL information (default 5s)
--keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
--keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
@@ -80,9 +100,6 @@ Usage of vtorc:
--topo_global_root string the path of the global topology data in the global topology server
--topo_global_server_address string the address of the global topology server
--topo_implementation string the topology implementation to use
- --topo_k8s_context string The kubeconfig context to use, overrides the 'current-context' from the config
- --topo_k8s_kubeconfig string Path to a valid kubeconfig file. When running as a k8s pod inside the same cluster you wish to use as the topo, you may omit this and the below arguments, and Vitess is capable of auto-discovering the correct values. https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod
- --topo_k8s_namespace string The kubernetes namespace to use for all objects. Default comes from the context or in-cluster config
--topo_zk_auth_file string auth to use when connecting to the zk topo server, file contents should be :, e.g., digest:user:pass
--topo_zk_base_timeout duration zk base timeout (see zk.Connect) (default 30s)
--topo_zk_max_concurrency int maximum number of pending requests to send to a Zookeeper server. (default 64)
diff --git a/go/flags/endtoend/vttablet.txt b/go/flags/endtoend/vttablet.txt
index 168c0222adc..2052b0f3ef4 100644
--- a/go/flags/endtoend/vttablet.txt
+++ b/go/flags/endtoend/vttablet.txt
@@ -1,4 +1,47 @@
-Usage of vttablet:
+The VTTablet server _controls_ a running MySQL server. VTTablet supports two primary types of deployments:
+
+* Managed MySQL (most common)
+* External MySQL
+
+In addition to these deployment types, a partially managed VTTablet is also possible by setting `--disable_active_reparents`.
+
+### Managed MySQL
+
+In this mode, Vitess actively manages MySQL.
+
+### External MySQL.
+
+In this mode, an external MySQL can be used such as AWS RDS, AWS Aurora, Google CloudSQL; or just an existing (vanilla) MySQL installation.
+
+See "Unmanaged Tablet" for the full guide.
+
+Even if a MySQL is external, you can still make vttablet perform some management functions. They are as follows:
+
+* `--disable_active_reparents`: If this flag is set, then any reparent or replica commands will not be allowed. These are InitShardPrimary, PlannedReparentShard, EmergencyReparentShard, and ReparentTablet. In this mode, you should use the TabletExternallyReparented command to inform vitess of the current primary.
+* `--replication_connect_retry`: This value is give to mysql when it connects a replica to the primary as the retry duration parameter.
+* `--enable_replication_reporter`: If this flag is set, then vttablet will transmit replica lag related information to the vtgates, which will allow it to balance load better. Additionally, enabling this will also cause vttablet to restart replication if it was stopped. However, it will do this only if `--disable_active_reparents` was not turned on.
+* `--heartbeat_enable` and `--heartbeat_interval duration`: cause vttablet to write heartbeats to the sidecar database. This information is also used by the replication reporter to assess replica lag.
+
+Usage:
+ vttablet [flags]
+
+Examples:
+
+vttablet \
+ --topo_implementation etcd2 \
+ --topo_global_server_address localhost:2379 \
+ --topo_global_root /vitess/ \
+ --tablet-path $alias \
+ --init_keyspace $keyspace \
+ --init_shard $shard \
+ --init_tablet_type $tablet_type \
+ --port $port \
+ --grpc_port $grpc_port \
+ --service_map 'grpc-queryservice,grpc-tabletmanager,grpc-updatestream'
+
+`$alias` needs to be of the form: `-id`, and the cell should match one of the local cells that was created in the topology. The id can be left padded with zeroes: `cell-100` and `cell-000000100` are synonymous.
+
+Flags:
--alsologtostderr log to standard error as well as files
--app_idle_timeout duration Idle timeout for app connections (default 1m0s)
--app_pool_size int Size of the connection pool for app connections (default 40)
@@ -13,6 +56,7 @@ Usage of vttablet:
--backup_storage_compress if set, the backup files will be compressed. (default true)
--backup_storage_implementation string Which backup storage implementation to use for creating and restoring backups.
--backup_storage_number_blocks int if backup_storage_compress is true, backup_storage_number_blocks sets the number of blocks that can be processed, in parallel, before the writer blocks, during compression (default is 2). It should be equal to the number of CPUs available for compression. (default 2)
+ --bind-address string Bind address for the server. If empty, the server will listen on all available unicast and anycast IP addresses of the local system.
--binlog_host string PITR restore parameter: hostname/IP of binlog server.
--binlog_password string PITR restore parameter: password of binlog server.
--binlog_player_grpc_ca string the server ca to use to validate servers when connecting
@@ -100,14 +144,12 @@ Usage of vttablet:
--emit_stats If set, emit stats to push-based monitoring and stats backends
--enable-consolidator Synonym to -enable_consolidator (default true)
--enable-consolidator-replicas Synonym to -enable_consolidator_replicas
- --enable-lag-throttler Synonym to -enable_lag_throttler
--enable-per-workload-table-metrics If true, query counts and query error metrics include a label that identifies the workload
--enable-tx-throttler Synonym to -enable_tx_throttler
--enable_consolidator This option enables the query consolidator. (default true)
--enable_consolidator_replicas This option enables the query consolidator only on replicas.
--enable_hot_row_protection If true, incoming transactions for the same row (range) will be queued and cannot consume all txpool slots.
--enable_hot_row_protection_dry_run If true, hot row protection is not enforced but logs if transactions would have been queued.
- --enable_lag_throttler If true, vttablet will run a throttler service, and will implicitly enable heartbeats
--enable_replication_reporter Use polling to track replication lag.
--enable_transaction_limit If true, limit on number of transactions open at the same time will be enforced for all users. User trying to open a new transaction after exhausting their limit will receive an error immediately, regardless of whether there are available slots or not.
--enable_transaction_limit_dry_run If true, limit on number of transactions open at the same time will be tracked for all users, but not enforced.
@@ -129,6 +171,7 @@ Usage of vttablet:
--grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon).
--grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server.
--grpc_auth_static_password_file string JSON File to read the users/passwords from.
+ --grpc_bind_address string Bind address for gRPC calls. If empty, listen on all addresses.
--grpc_ca string server CA to use for gRPC connections, requires TLS, and enforces client certificate check
--grpc_cert string server certificate to use for gRPC connections, requires grpc_key, enables TLS
--grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
@@ -154,7 +197,7 @@ Usage of vttablet:
--heartbeat_enable If true, vttablet records (if master) or checks (if replica) the current time of a replication heartbeat in the sidecar database's heartbeat table. The result is used to inform the serving state of the vttablet via healthchecks.
--heartbeat_interval duration How frequently to read and write replication heartbeat. (default 1s)
--heartbeat_on_demand_duration duration If non-zero, heartbeats are only written upon consumer request, and only run for up to given duration following the request. Frequent requests can keep the heartbeat running consistently; when requests are infrequent heartbeat may completely stop between requests
- -h, --help display usage and exit
+ -h, --help help for vttablet
--hot_row_protection_concurrent_transactions int Number of concurrent transactions let through to the txpool/MySQL for the same hot row. Should be > 1 to have enough 'ready' transactions in MySQL and benefit from a pipelining effect. (default 5)
--hot_row_protection_max_global_queue_size int Global queue limit across all row (ranges). Useful to prevent that the queue can grow unbounded. (default 1000)
--hot_row_protection_max_queue_size int Maximum number of BeginExecute RPCs which will be queued for the same row (range). (default 20)
@@ -227,9 +270,7 @@ Usage of vttablet:
--queryserver-config-passthrough-dmls query server pass through all dml statements without rewriting
--queryserver-config-pool-conn-max-lifetime duration query server connection max lifetime (in seconds), vttablet manages various mysql connection pools. This config means if a connection has lived at least this long, it connection will be removed from pool upon the next time it is returned to the pool. (default 0s)
--queryserver-config-pool-size int query server read pool size, connection pool is used by regular queries (non streaming, not in a transaction) (default 16)
- --queryserver-config-query-cache-lfu query server cache algorithm. when set to true, a new cache algorithm based on a TinyLFU admission policy will be used to improve cache behavior and prevent pollution from sparse queries (default true)
--queryserver-config-query-cache-memory int query server query cache size in bytes, maximum amount of memory to be used for caching. vttablet analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache. (default 33554432)
- --queryserver-config-query-cache-size int query server query cache size, maximum number of queries to be cached. vttablet analyzes every incoming query and generate a query plan, these plans are being cached in a lru cache. This config controls the capacity of the lru cache. (default 5000)
--queryserver-config-query-pool-timeout duration query server query pool timeout (in seconds), it is how long vttablet waits for a connection from the query pool. If set to 0 (default) then the overall query timeout is used instead. (default 0s)
--queryserver-config-query-pool-waiter-cap int query server query pool waiter limit, this is the maximum number of queries that can be queued waiting to get a connection (default 5000)
--queryserver-config-query-timeout duration query server query timeout (in seconds), this is the query timeout in vttablet side. If a query takes more than this timeout, it will be killed. (default 30s)
@@ -255,6 +296,8 @@ Usage of vttablet:
--relay_log_max_size int Maximum buffer size (in bytes) for VReplication target buffering. If single rows are larger than this, a single row is buffered at a time. (default 250000)
--remote_operation_timeout duration time to wait for a remote operation (default 15s)
--replication_connect_retry duration how long to wait in between replica reconnect attempts. Only precise to the second. (default 10s)
+ --restore-to-pos string (init incremental restore parameter) if set, run a point in time recovery that ends with the given position. This will attempt to use one full backup followed by zero or more incremental backups
+ --restore-to-timestamp string (init incremental restore parameter) if set, run a point in time recovery that restores up to the given timestamp, if possible. Given timestamp in RFC3339 format. Example: '2006-01-02T15:04:05Z07:00'
--restore_concurrency int (init restore parameter) how many concurrent files to restore at once (default 4)
--restore_from_backup (init restore parameter) will check BackupStorage for a recent backup at startup and start there
--restore_from_backup_ts string (init restore parameter) if set, restore the latest backup taken at or before this timestamp. Example: '2021-04-29.133050'
@@ -312,9 +355,6 @@ Usage of vttablet:
--tablet_manager_grpc_server_name string the server name to use to validate server certificate
--tablet_manager_protocol string Protocol to use to make tabletmanager RPCs to vttablets. (default "grpc")
--tablet_protocol string Protocol to use to make queryservice RPCs to vttablets. (default "grpc")
- --throttle_check_as_check_self Should throttler/check return a throttler/check-self result (changes throttler behavior for writes)
- --throttle_metrics_query SELECT Override default heartbeat/lag metric. Use either SELECT (must return single row, single value) or `SHOW GLOBAL ... LIKE ...` queries. Set -throttle_metrics_threshold respectively.
- --throttle_metrics_threshold float Override default throttle threshold, respective to --throttle_metrics_query (default 1.7976931348623157e+308)
--throttle_tablet_types string Comma separated VTTablet types to be considered by the throttler. default: 'replica'. example: 'replica,rdonly'. 'replica' aways implicitly included (default "replica")
--throttle_threshold duration Replication lag threshold for default lag throttling (default 1s)
--throttler-config-via-topo When 'true', read config from topo service and ignore throttle_threshold, throttle_metrics_threshold, throttle_metrics_query, throttle_check_as_check_self (default true)
@@ -331,9 +371,6 @@ Usage of vttablet:
--topo_global_root string the path of the global topology data in the global topology server
--topo_global_server_address string the address of the global topology server
--topo_implementation string the topology implementation to use
- --topo_k8s_context string The kubeconfig context to use, overrides the 'current-context' from the config
- --topo_k8s_kubeconfig string Path to a valid kubeconfig file. When running as a k8s pod inside the same cluster you wish to use as the topo, you may omit this and the below arguments, and Vitess is capable of auto-discovering the correct values. https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#accessing-the-api-from-a-pod
- --topo_k8s_namespace string The kubernetes namespace to use for all objects. Default comes from the context or in-cluster config
--topo_zk_auth_file string auth to use when connecting to the zk topo server, file contents should be :, e.g., digest:user:pass
--topo_zk_base_timeout duration zk base timeout (see zk.Connect) (default 30s)
--topo_zk_max_concurrency int maximum number of pending requests to send to a Zookeeper server. (default 64)
@@ -356,11 +393,13 @@ Usage of vttablet:
--twopc_abandon_age float time in seconds. Any unresolved transaction older than this time will be sent to the coordinator to be resolved.
--twopc_coordinator_address string address of the (VTGate) process(es) that will be used to notify of abandoned transactions.
--twopc_enable if the flag is on, 2pc is enabled. Other 2pc flags must be supplied.
- --tx-throttler-config string Synonym to -tx_throttler_config (default "target_replication_lag_sec: 2\nmax_replication_lag_sec: 10\ninitial_rate: 100\nmax_increase: 1\nemergency_decrease: 0.5\nmin_duration_between_increases_sec: 40\nmax_duration_between_increases_sec: 62\nmin_duration_between_decreases_sec: 20\nspread_backlog_across_sec: 20\nage_bad_rate_after_sec: 180\nbad_rate_increase: 0.1\nmax_rate_approach_threshold: 0.9\n")
+ --tx-throttler-config string Synonym to -tx_throttler_config (default "target_replication_lag_sec:2 max_replication_lag_sec:10 initial_rate:100 max_increase:1 emergency_decrease:0.5 min_duration_between_increases_sec:40 max_duration_between_increases_sec:62 min_duration_between_decreases_sec:20 spread_backlog_across_sec:20 age_bad_rate_after_sec:180 bad_rate_increase:0.1 max_rate_approach_threshold:0.9")
--tx-throttler-default-priority int Default priority assigned to queries that lack priority information (default 100)
+ --tx-throttler-dry-run If present, the transaction throttler only records metrics about requests received and throttled, but does not actually throttle any requests.
--tx-throttler-healthcheck-cells strings Synonym to -tx_throttler_healthcheck_cells
--tx-throttler-tablet-types strings A comma-separated list of tablet types. Only tablets of this type are monitored for replication lag by the transaction throttler. Supported types are replica and/or rdonly. (default replica)
- --tx_throttler_config string The configuration of the transaction throttler as a text-formatted throttlerdata.Configuration protocol buffer message. (default "target_replication_lag_sec: 2\nmax_replication_lag_sec: 10\ninitial_rate: 100\nmax_increase: 1\nemergency_decrease: 0.5\nmin_duration_between_increases_sec: 40\nmax_duration_between_increases_sec: 62\nmin_duration_between_decreases_sec: 20\nspread_backlog_across_sec: 20\nage_bad_rate_after_sec: 180\nbad_rate_increase: 0.1\nmax_rate_approach_threshold: 0.9\n")
+ --tx-throttler-topo-refresh-interval duration The rate that the transaction throttler will refresh the topology to find cells. (default 5m0s)
+ --tx_throttler_config string The configuration of the transaction throttler as a text-formatted throttlerdata.Configuration protocol buffer message. (default "target_replication_lag_sec:2 max_replication_lag_sec:10 initial_rate:100 max_increase:1 emergency_decrease:0.5 min_duration_between_increases_sec:40 max_duration_between_increases_sec:62 min_duration_between_decreases_sec:20 spread_backlog_across_sec:20 age_bad_rate_after_sec:180 bad_rate_increase:0.1 max_rate_approach_threshold:0.9")
--tx_throttler_healthcheck_cells strings A comma-separated list of cells. Only tabletservers running in these cells will be monitored for replication lag by the transaction throttler.
--unhealthy_threshold duration replication lag after which a replica is considered unhealthy (default 2h0m0s)
--v Level log level for V logs
diff --git a/go/flags/endtoend/vttestserver.txt b/go/flags/endtoend/vttestserver.txt
index c93bdc4ff4f..f1b666eb93c 100644
--- a/go/flags/endtoend/vttestserver.txt
+++ b/go/flags/endtoend/vttestserver.txt
@@ -1,4 +1,9 @@
-Usage of vttestserver:
+vttestserver allows users to spawn a self-contained Vitess server for local testing/CI.
+
+Usage:
+ vttestserver [flags]
+
+Flags:
--alsologtostderr log to standard error as well as files
--app_idle_timeout duration Idle timeout for app connections (default 1m0s)
--app_pool_size int Size of the connection pool for app connections (default 40)
@@ -42,6 +47,7 @@ Usage of vttestserver:
--grpc_auth_mtls_allowed_substrings string List of substrings of at least one of the client certificate names (separated by colon).
--grpc_auth_static_client_creds string When using grpc_static_auth in the server, this file provides the credentials to use to authenticate with server.
--grpc_auth_static_password_file string JSON File to read the users/passwords from.
+ --grpc_bind_address string Bind address for gRPC calls. If empty, listen on all addresses.
--grpc_ca string server CA to use for gRPC connections, requires TLS, and enforces client certificate check
--grpc_cert string server certificate to use for gRPC connections, requires grpc_key, enables TLS
--grpc_compression string Which protocol to use for compressing gRPC. Default: nothing. Supported: snappy
@@ -63,7 +69,7 @@ Usage of vttestserver:
--grpc_server_initial_window_size int gRPC server initial window size
--grpc_server_keepalive_enforcement_policy_min_time duration gRPC server minimum keepalive time (default 10s)
--grpc_server_keepalive_enforcement_policy_permit_without_stream gRPC server permit client keepalive pings even when there are no active streams (RPCs)
- -h, --help display usage and exit
+ -h, --help help for vttestserver
--initialize_with_random_data If this flag is each table-shard will be initialized with random data. See also the 'rng_seed' and 'min_shard_size' and 'max_shard_size' flags.
--keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
--keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
@@ -87,9 +93,9 @@ Usage of vttestserver:
--num_shards strings Comma separated shard count (one per keyspace) (default [2])
--onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 10s)
--onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s)
- --persistent_mode If this flag is set, the MySQL data directory is not cleaned up when LocalCluster.TearDown() is called. This is useful for running vttestserver as a database container in local developer environments. Note that db migration files (--schema_dir option) and seeding of random data (--initialize_with_random_data option) will only run during cluster startup if the data directory does not already exist. vschema migrations are run every time the cluster starts, since persistence for the topology server has not been implemented yet
+ --persistent_mode If this flag is set, the MySQL data directory is not cleaned up when LocalCluster.TearDown() is called. This is useful for running vttestserver as a database container in local developer environments. Note that db migration files (--schema_dir option) and seeding of random data (--initialize_with_random_data option) will only run during cluster startup if the data directory does not already exist. Changes to VSchema are persisted across cluster restarts using a simple watcher if the --data_dir argument is specified.
--pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown.
- --planner-version string Sets the default planner to use when the session has not changed it. Valid values are: V3, V3Insert, Gen4, Gen4Greedy and Gen4Fallback. Gen4Fallback tries the new gen4 planner and falls back to the V3 planner if the gen4 fails.
+ --planner-version string Sets the default planner to use when the session has not changed it. Valid values are: Gen4, Gen4Greedy, Gen4Left2Right
--pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled)
--port int Port to use for vtcombo. If this is 0, a random port will be chosen.
--pprof strings enable profiling
diff --git a/go/flags/endtoend/zk.txt b/go/flags/endtoend/zk.txt
index 443bf0b9ca2..add1b6b6803 100644
--- a/go/flags/endtoend/zk.txt
+++ b/go/flags/endtoend/zk.txt
@@ -1,8 +1,41 @@
-Usage of zk:
- -h, --help display usage and exit
+zk is a tool for wrangling zookeeper.
+
+It tries to mimic unix file system commands wherever possible, but
+there are some slight differences in flag handling.
+
+The zk tool looks for the address of the cluster in /etc/zookeeper/zk_client.conf,
+or the file specified in the ZK_CLIENT_CONFIG environment variable.
+
+The local cell may be overridden with the ZK_CLIENT_LOCAL_CELL environment
+variable.
+
+Usage:
+ zk [command]
+
+Available Commands:
+ addAuth
+ cat
+ chmod
+ completion Generate the autocompletion script for the specified shell
+ cp
+ edit Create a local copy, edit, and write changes back to cell.
+ help Help about any command
+ ls
+ rm
+ stat
+ touch Change node access time.
+ unzip
+ wait Sets a watch on the node and then waits for an event to fire.
+ watch Watches for changes to nodes and prints events as they occur.
+ zip Store a zk tree in a zip archive.
+
+Flags:
+ -h, --help help for zk
--keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
--keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
--log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
--purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
--security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
--server string server(s) to connect to
+
+Use "zk [command] --help" for more information about a command.
diff --git a/go/flags/endtoend/zkctl.txt b/go/flags/endtoend/zkctl.txt
index 36ddace46db..d1aea061ea5 100644
--- a/go/flags/endtoend/zkctl.txt
+++ b/go/flags/endtoend/zkctl.txt
@@ -1,4 +1,17 @@
-Usage of zkctl:
+Initializes and controls zookeeper with Vitess-specific configuration.
+
+Usage:
+ zkctl [command]
+
+Available Commands:
+ completion Generate the autocompletion script for the specified shell
+ help Help about any command
+ init Generates a new config and then starts zookeeper.
+ shutdown Terminates a zookeeper server but keeps its data dir intact.
+ start Runs an already initialized zookeeper server.
+ teardown Shuts down the zookeeper server and removes its data dir.
+
+Flags:
--alsologtostderr log to standard error as well as files
--config-file string Full path of the config file (with extension) to use. If set, --config-path, --config-type, and --config-name are ignored.
--config-file-not-found-handling ConfigFileNotFoundHandling Behavior when a config file is not found. (Options: error, exit, ignore, warn) (default warn)
@@ -6,7 +19,7 @@ Usage of zkctl:
--config-path strings Paths to search for config files in. (default [{{ .Workdir }}])
--config-persistence-min-interval duration minimum interval between persisting dynamic config changes back to disk (if no change has occurred, nothing is done). (default 1s)
--config-type string Config file type (omit to infer config type from file extension).
- -h, --help display usage and exit
+ -h, --help help for zkctl
--keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
--keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
@@ -21,4 +34,7 @@ Usage of zkctl:
-v, --version print binary version
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
--zk.cfg string zkid@server1:leaderPort1:electionPort1:clientPort1,...) (default "6@:3801:3802:3803")
+ --zk.extra stringArray extra config line(s) to append verbatim to config (flag can be specified more than once)
--zk.myid uint which server do you want to be? only needed when running multiple instance on one box, otherwise myid is implied by hostname
+
+Use "zkctl [command] --help" for more information about a command.
diff --git a/go/flags/endtoend/zkctld.txt b/go/flags/endtoend/zkctld.txt
index 76f19523660..d808bd7ce67 100644
--- a/go/flags/endtoend/zkctld.txt
+++ b/go/flags/endtoend/zkctld.txt
@@ -1,25 +1,7 @@
-Usage of zkctld:
- --alsologtostderr log to standard error as well as files
- --config-file string Full path of the config file (with extension) to use. If set, --config-path, --config-type, and --config-name are ignored.
- --config-file-not-found-handling ConfigFileNotFoundHandling Behavior when a config file is not found. (Options: error, exit, ignore, warn) (default warn)
- --config-name string Name of the config file (without extension) to search for. (default "vtconfig")
- --config-path strings Paths to search for config files in. (default [{{ .Workdir }}])
- --config-persistence-min-interval duration minimum interval between persisting dynamic config changes back to disk (if no change has occurred, nothing is done). (default 1s)
- --config-type string Config file type (omit to infer config type from file extension).
- -h, --help display usage and exit
- --keep_logs duration keep logs for this long (using ctime) (zero to keep forever)
- --keep_logs_by_mtime duration keep logs for this long (using mtime) (zero to keep forever)
- --log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
- --log_dir string If non-empty, write log files in this directory
- --log_err_stacks log stack traces for errors
- --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800)
- --logtostderr log to standard error instead of files
- --pprof strings enable profiling
- --purge_logs_interval duration how often try to remove old logs (default 1h0m0s)
- --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only)
- --stderrthreshold severity logs at or above this threshold go to stderr (default 1)
- --v Level log level for V logs
- -v, --version print binary version
- --vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
- --zk.cfg string zkid@server1:leaderPort1:electionPort1:clientPort1,...) (default "6@:3801:3802:3803")
- --zk.myid uint which server do you want to be? only needed when running multiple instance on one box, otherwise myid is implied by hostname
+zkctld is a daemon that starts or initializes ZooKeeper with Vitess-specific configuration. It will stay running as long as the underlying ZooKeeper server, and will pass along SIGTERM.
+
+Usage:
+ zkctld [flags]
+
+Flags:
+ -h, --help help for zkctld
diff --git a/go/flagutil/flagutil.go b/go/flagutil/flagutil.go
index f6604295730..ebf4ccef485 100644
--- a/go/flagutil/flagutil.go
+++ b/go/flagutil/flagutil.go
@@ -189,6 +189,17 @@ func DualFormatBoolVar(fs *pflag.FlagSet, p *bool, name string, value bool, usag
}
}
+// DualFormatVar creates a flag which supports both dashes and underscores
+func DualFormatVar(fs *pflag.FlagSet, val pflag.Value, name string, usage string) {
+ dashes := strings.Replace(name, "_", "-", -1)
+ underscores := strings.Replace(name, "-", "_", -1)
+
+ fs.Var(val, underscores, usage)
+ if dashes != underscores {
+ fs.Var(val, dashes, fmt.Sprintf("Synonym to -%s", underscores))
+ }
+}
+
type Value[T any] interface {
pflag.Value
Get() T
diff --git a/go/hack/hack.go b/go/hack/hack.go
index 8b042950d1e..95bf11f5530 100644
--- a/go/hack/hack.go
+++ b/go/hack/hack.go
@@ -21,7 +21,6 @@ limitations under the License.
package hack
import (
- "reflect"
"unsafe"
)
@@ -37,10 +36,5 @@ func String(b []byte) (s string) {
// StringBytes returns the underlying bytes for a string. Modifying this byte slice
// will lead to undefined behavior.
func StringBytes(s string) []byte {
- var b []byte
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&b))
- hdr.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
- hdr.Cap = len(s)
- hdr.Len = len(s)
- return b
+ return unsafe.Slice(unsafe.StringData(s), len(s))
}
diff --git a/go/hack/runtime.go b/go/hack/runtime.go
index d1ccb699460..c80ac1d38e5 100644
--- a/go/hack/runtime.go
+++ b/go/hack/runtime.go
@@ -19,7 +19,6 @@ limitations under the License.
package hack
import (
- "reflect"
"unsafe"
)
@@ -35,8 +34,7 @@ func strhash(p unsafe.Pointer, h uintptr) uintptr
// This is an optimal hash function which takes an input seed and is potentially implemented in hardware
// for most architectures. This is the same hash function that the language's `map` uses.
func RuntimeMemhash(b []byte, seed uint64) uint64 {
- pstring := (*reflect.SliceHeader)(unsafe.Pointer(&b))
- return uint64(memhash(unsafe.Pointer(pstring.Data), uintptr(seed), uintptr(pstring.Len)))
+ return uint64(memhash(unsafe.Pointer(unsafe.SliceData(b)), uintptr(seed), uintptr(len(b))))
}
// RuntimeStrhash provides access to the Go runtime's default hash function for strings.
diff --git a/go/internal/flag/flag.go b/go/internal/flag/flag.go
index 6f087143610..ade4907e573 100644
--- a/go/internal/flag/flag.go
+++ b/go/internal/flag/flag.go
@@ -42,7 +42,7 @@ import (
//
// See VEP-4, phase 1 for details: https://github.com/vitessio/enhancements/blob/c766ea905e55409cddeb666d6073cd2ac4c9783e/veps/vep-4.md#phase-1-preparation
func Parse(fs *flag.FlagSet) {
- preventGlogVFlagFromClobberingVersionFlagShorthand(fs)
+ PreventGlogVFlagFromClobberingVersionFlagShorthand(fs)
fs.AddGoFlagSet(goflag.CommandLine)
if fs.Lookup("help") == nil {
@@ -115,7 +115,7 @@ func TrickGlog() {
//
// IMPORTANT: This must be called prior to AddGoFlagSet in both Parse and
// ParseFlagsForTest.
-func preventGlogVFlagFromClobberingVersionFlagShorthand(fs *flag.FlagSet) {
+func PreventGlogVFlagFromClobberingVersionFlagShorthand(fs *flag.FlagSet) {
// N.B. we use goflag.Lookup instead of this package's Lookup, because we
// explicitly want to check only the goflags.
if f := goflag.Lookup("v"); f != nil {
@@ -178,7 +178,7 @@ func ParseFlagsForTest() {
}
// parse remaining flags including the log-related ones like --alsologtostderr
- preventGlogVFlagFromClobberingVersionFlagShorthand(flag.CommandLine)
+ PreventGlogVFlagFromClobberingVersionFlagShorthand(flag.CommandLine)
flag.CommandLine.AddGoFlagSet(goflag.CommandLine)
flag.Parse()
}
diff --git a/go/ioutil/timeout_closer.go b/go/ioutil/timeout_closer.go
new file mode 100644
index 00000000000..1f025fbdb44
--- /dev/null
+++ b/go/ioutil/timeout_closer.go
@@ -0,0 +1,59 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package ioutil
+
+import (
+ "context"
+ "io"
+ "time"
+)
+
+// TimeoutCloser is an io.Closer that has a timeout for executing the Close() function.
+type TimeoutCloser struct {
+ ctx context.Context
+ closer io.Closer
+ timeout time.Duration
+}
+
+func NewTimeoutCloser(ctx context.Context, closer io.Closer, timeout time.Duration) *TimeoutCloser {
+ return &TimeoutCloser{
+ ctx: ctx,
+ closer: closer,
+ timeout: timeout,
+ }
+}
+
+func (c *TimeoutCloser) Close() error {
+ done := make(chan error)
+
+ ctx, cancel := context.WithTimeout(c.ctx, c.timeout)
+ defer cancel()
+
+ go func() {
+ defer close(done)
+ select {
+ case done <- c.closer.Close():
+ case <-ctx.Done():
+ }
+ }()
+ select {
+ case err := <-done:
+ return err
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+}
diff --git a/go/ioutil/timeout_closer_test.go b/go/ioutil/timeout_closer_test.go
new file mode 100644
index 00000000000..9aabe307c85
--- /dev/null
+++ b/go/ioutil/timeout_closer_test.go
@@ -0,0 +1,53 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package ioutil
+
+import (
+ "context"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+type hangCloser struct {
+ hang bool
+}
+
+func (c hangCloser) Close() error {
+ if c.hang {
+ ch := make(chan bool)
+ ch <- true // hang forever
+ }
+ return nil
+}
+
+func TestTimeoutCloser(t *testing.T) {
+ ctx := context.Background()
+ {
+ closer := NewTimeoutCloser(ctx, &hangCloser{hang: false}, time.Second)
+ err := closer.Close()
+ require.NoError(t, err)
+ }
+ {
+ closer := NewTimeoutCloser(ctx, &hangCloser{hang: true}, time.Second)
+ err := closer.Close()
+ require.Error(t, err)
+ assert.ErrorIs(t, err, context.DeadlineExceeded)
+ }
+}
diff --git a/go/maps2/maps.go b/go/maps2/maps.go
new file mode 100644
index 00000000000..56191bea1a7
--- /dev/null
+++ b/go/maps2/maps.go
@@ -0,0 +1,37 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package maps2
+
+// Keys returns the keys of the map m.
+// The keys will be in an indeterminate order.
+func Keys[M ~map[K]V, K comparable, V any](m M) []K {
+ r := make([]K, 0, len(m))
+ for k := range m {
+ r = append(r, k)
+ }
+ return r
+}
+
+// Values returns the values of the map m.
+// The values will be in an indeterminate order.
+func Values[M ~map[K]V, K comparable, V any](m M) []V {
+ r := make([]V, 0, len(m))
+ for _, v := range m {
+ r = append(r, v)
+ }
+ return r
+}
diff --git a/go/mysql/auth_server.go b/go/mysql/auth_server.go
index 64ff2beaa11..a01fdc59971 100644
--- a/go/mysql/auth_server.go
+++ b/go/mysql/auth_server.go
@@ -26,6 +26,7 @@ import (
"net"
"sync"
+ "vitess.io/vitess/go/mysql/sqlerror"
"vitess.io/vitess/go/vt/log"
"vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/vterrors"
@@ -442,7 +443,7 @@ func (n *mysqlNativePasswordAuthMethod) AllowClearTextWithoutTLS() bool {
func (n *mysqlNativePasswordAuthMethod) HandleAuthPluginData(conn *Conn, user string, serverAuthPluginData []byte, clientAuthPluginData []byte, remoteAddr net.Addr) (Getter, error) {
if serverAuthPluginData[len(serverAuthPluginData)-1] != 0x00 {
- return nil, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user)
+ return nil, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user)
}
salt := serverAuthPluginData[:len(serverAuthPluginData)-1]
@@ -534,7 +535,7 @@ func (n *mysqlCachingSha2AuthMethod) AllowClearTextWithoutTLS() bool {
func (n *mysqlCachingSha2AuthMethod) HandleAuthPluginData(c *Conn, user string, serverAuthPluginData []byte, clientAuthPluginData []byte, remoteAddr net.Addr) (Getter, error) {
if serverAuthPluginData[len(serverAuthPluginData)-1] != 0x00 {
- return nil, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user)
+ return nil, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user)
}
salt := serverAuthPluginData[:len(serverAuthPluginData)-1]
@@ -546,7 +547,7 @@ func (n *mysqlCachingSha2AuthMethod) HandleAuthPluginData(c *Conn, user string,
switch cacheState {
case AuthRejected:
- return nil, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user)
+ return nil, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user)
case AuthAccepted:
// We need to write a more data packet to indicate the
// handshake completed properly. This will be followed
@@ -561,7 +562,7 @@ func (n *mysqlCachingSha2AuthMethod) HandleAuthPluginData(c *Conn, user string,
return result, nil
case AuthNeedMoreData:
if !c.TLSEnabled() && !c.IsUnixSocket() {
- return nil, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user)
+ return nil, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user)
}
data, pos := c.startEphemeralPacketWithHeader(2)
@@ -577,7 +578,7 @@ func (n *mysqlCachingSha2AuthMethod) HandleAuthPluginData(c *Conn, user string,
return n.storage.UserEntryWithPassword(c, user, password, remoteAddr)
default:
// Somehow someone returned an unknown state, let's error with access denied.
- return nil, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user)
+ return nil, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user)
}
}
diff --git a/go/mysql/auth_server_clientcert_test.go b/go/mysql/auth_server_clientcert_test.go
index 560be63bab6..8b0530da253 100644
--- a/go/mysql/auth_server_clientcert_test.go
+++ b/go/mysql/auth_server_clientcert_test.go
@@ -45,7 +45,7 @@ func testValidCert(t *testing.T) {
authServer := newAuthServerClientCert()
// Create the listener, so we can get its host.
- l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false)
+ l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0)
require.NoError(t, err, "NewListener failed: %v", err)
defer l.Close()
host := l.Addr().(*net.TCPAddr).IP.String()
@@ -114,7 +114,7 @@ func testNoCert(t *testing.T) {
authServer := newAuthServerClientCert()
// Create the listener, so we can get its host.
- l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false)
+ l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0)
require.NoError(t, err, "NewListener failed: %v", err)
defer l.Close()
host := l.Addr().(*net.TCPAddr).IP.String()
diff --git a/go/mysql/auth_server_config.go b/go/mysql/auth_server_config.go
index ec2edb28a44..68deee34584 100644
--- a/go/mysql/auth_server_config.go
+++ b/go/mysql/auth_server_config.go
@@ -10,6 +10,8 @@ import (
"os"
"sync"
+ "vitess.io/vitess/go/mysql/sqlerror"
+
"github.com/spf13/pflag"
"vitess.io/vitess/go/ipfilters"
@@ -249,13 +251,13 @@ func (asc *AuthServerConfig) UserEntryWithHash(conn *Conn, salt []byte, user str
entry, ok := asc.Entries[user]
asc.mu.Unlock()
if !ok {
- return &ConfigUserData{}, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user)
+ return &ConfigUserData{}, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user)
}
if entry.MysqlNativePassword != "" {
hash, err := DecodeMysqlNativePasswordHex(entry.MysqlNativePassword)
if err != nil {
- return &ConfigUserData{username: entry.UserData, groups: entry.Groups}, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user)
+ return &ConfigUserData{username: entry.UserData, groups: entry.Groups}, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user)
}
isPass := VerifyHashedMysqlNativePassword(authResponse, salt, hash)
if isPass {
@@ -274,17 +276,17 @@ func (asc *AuthServerConfig) UserEntryWithHash(conn *Conn, salt []byte, user str
} else if encryptFromEnt.UserData == "v_0001" {
ecnPass, err := aseV001(entry.Password, []byte("akArIfh/a28N8w=="))
if err != nil {
- return &ConfigUserData{}, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v' error ses descryption", user)
+ return &ConfigUserData{}, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v' error ses descryption", user)
}
ecnPassStr = ecnPass
} else {
- return &ConfigUserData{}, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v' error desencryption verssion", user)
+ return &ConfigUserData{}, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v' error desencryption verssion", user)
}
computedAuthResponse := ScrambleMysqlNativePassword(salt, []byte(ecnPassStr))
if !bytes.Equal(authResponse, computedAuthResponse) {
- return &ConfigUserData{}, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user)
+ return &ConfigUserData{}, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user)
}
return &ConfigUserData{entry.UserData, entry.Groups}, nil
}
@@ -301,12 +303,12 @@ func (asc *AuthServerConfig) ValidateClearText(user, password string) (string, e
entry, ok := asc.Entries[user]
asc.mu.Unlock()
if !ok {
- return "", NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user)
+ return "", sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user)
}
// Validate the password.
if entry.Password != password {
- return "", NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user)
+ return "", sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user)
}
return entry.UserData, nil
@@ -319,7 +321,7 @@ func (asc *AuthServerConfig) GetPrivilege(user string) (uint16, error) {
entry, ok := asc.Entries[user]
asc.mu.Unlock()
if !ok {
- return 0, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user)
+ return 0, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user)
}
return entry.Privilege, nil
}
@@ -332,7 +334,7 @@ func (asc *AuthServerConfig) GetUserKeyspaces(user string) ([]string, error) {
entry, ok := asc.Entries[user]
asc.mu.Unlock()
if !ok {
- return nil, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user)
+ return nil, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user)
}
for _, v := range entry.KeySpaces {
userKeyspaces = append(userKeyspaces, v.Name)
@@ -344,7 +346,7 @@ func (asc *AuthServerConfig) GetUserKeyspaces(user string) ([]string, error) {
func (asc *AuthServerConfig) GetKeyspace(user string) ([]string, error) {
entry, ok := asc.Entries[user]
if !ok {
- return nil, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user)
+ return nil, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user)
}
if len(entry.KeySpaces) == 0 {
return nil, nil
@@ -361,7 +363,7 @@ func (asc *AuthServerConfig) GetKeyspace(user string) ([]string, error) {
func (asc *AuthServerConfig) GetRoleType(user string) (int8, error) {
entry, ok := asc.Entries[user]
if !ok {
- return 0, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user)
+ return 0, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user)
}
if len(entry.KeySpaces) == 0 {
return 0, nil
@@ -374,7 +376,7 @@ func (asc *AuthServerConfig) GetPassword(user string) (string, error) {
// Find the entry.
entry, ok := asc.Entries[user]
if !ok {
- return "", NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user)
+ return "", sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user)
}
encryptFromEnt := asc.Entries["encrypt_version"]
@@ -385,12 +387,12 @@ func (asc *AuthServerConfig) GetPassword(user string) (string, error) {
if encryptFromEnt.UserData == "v_0001" {
ecnPass, err := aseV001(entry.Password, []byte("akArIfh/a28N8w=="))
if err != nil {
- return "", NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v' error ses descryption", user)
+ return "", sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v' error ses descryption", user)
}
return ecnPass, nil
}
- return "", NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "unsupported encrypt version for user :%v", user)
+ return "", sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "unsupported encrypt version for user :%v", user)
}
// ConfigUserData holds the username and groups
diff --git a/go/mysql/auth_server_static.go b/go/mysql/auth_server_static.go
index 15d155ce061..c8a43968625 100644
--- a/go/mysql/auth_server_static.go
+++ b/go/mysql/auth_server_static.go
@@ -29,6 +29,8 @@ import (
"github.com/spf13/pflag"
+ "vitess.io/vitess/go/mysql/sqlerror"
+
"vitess.io/vitess/go/vt/log"
"vitess.io/vitess/go/vt/servenv"
"vitess.io/vitess/go/vt/vterrors"
@@ -179,7 +181,7 @@ func (a *AuthServerStatic) UserEntryWithPassword(conn *Conn, user string, passwo
a.mu.Unlock()
if !ok {
- return &StaticUserData{}, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user)
+ return &StaticUserData{}, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user)
}
for _, entry := range entries {
@@ -188,7 +190,7 @@ func (a *AuthServerStatic) UserEntryWithPassword(conn *Conn, user string, passwo
return &StaticUserData{entry.UserData, entry.Groups}, nil
}
}
- return &StaticUserData{}, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user)
+ return &StaticUserData{}, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user)
}
// UserEntryWithHash implements password lookup based on a
@@ -199,14 +201,14 @@ func (a *AuthServerStatic) UserEntryWithHash(conn *Conn, salt []byte, user strin
a.mu.Unlock()
if !ok {
- return &StaticUserData{}, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user)
+ return &StaticUserData{}, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user)
}
for _, entry := range entries {
if entry.MysqlNativePassword != "" {
hash, err := DecodeMysqlNativePasswordHex(entry.MysqlNativePassword)
if err != nil {
- return &StaticUserData{entry.UserData, entry.Groups}, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user)
+ return &StaticUserData{entry.UserData, entry.Groups}, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user)
}
isPass := VerifyHashedMysqlNativePassword(authResponse, salt, hash)
@@ -221,7 +223,7 @@ func (a *AuthServerStatic) UserEntryWithHash(conn *Conn, salt []byte, user strin
}
}
}
- return &StaticUserData{}, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user)
+ return &StaticUserData{}, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user)
}
// UserEntryWithCacheHash implements password lookup based on a
@@ -232,7 +234,7 @@ func (a *AuthServerStatic) UserEntryWithCacheHash(conn *Conn, salt []byte, user
a.mu.Unlock()
if !ok {
- return &StaticUserData{}, AuthRejected, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user)
+ return &StaticUserData{}, AuthRejected, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user)
}
for _, entry := range entries {
@@ -243,7 +245,7 @@ func (a *AuthServerStatic) UserEntryWithCacheHash(conn *Conn, salt []byte, user
return &StaticUserData{entry.UserData, entry.Groups}, AuthAccepted, nil
}
}
- return &StaticUserData{}, AuthRejected, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "Access denied for user '%v'", user)
+ return &StaticUserData{}, AuthRejected, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "Access denied for user '%v'", user)
}
// AuthMethods returns the AuthMethod instances this auth server can handle.
diff --git a/go/mysql/auth_server_static_flaky_test.go b/go/mysql/auth_server_static_flaky_test.go
index 52e8fee8ab4..12ae74e0d60 100644
--- a/go/mysql/auth_server_static_flaky_test.go
+++ b/go/mysql/auth_server_static_flaky_test.go
@@ -126,9 +126,7 @@ func TestStaticConfigHUP(t *testing.T) {
mu.Lock()
defer mu.Unlock()
// delete registered Auth server
- for auth := range authServers {
- delete(authServers, auth)
- }
+ clear(authServers)
}
func TestStaticConfigHUPWithRotation(t *testing.T) {
diff --git a/go/mysql/binlog/binlog_json_test.go b/go/mysql/binlog/binlog_json_test.go
index f6d4fe7fcf2..5652b58567e 100644
--- a/go/mysql/binlog/binlog_json_test.go
+++ b/go/mysql/binlog/binlog_json_test.go
@@ -265,7 +265,7 @@ func TestMarshalJSONToSQL(t *testing.T) {
{
name: "null",
data: []byte{},
- expected: "CAST(null as JSON)",
+ expected: "CAST(_utf8mb4'null' as JSON)",
},
{
name: `object {"a": "b"}`,
@@ -330,17 +330,17 @@ func TestMarshalJSONToSQL(t *testing.T) {
{
name: `true`,
data: []byte{4, 1},
- expected: `CAST(true as JSON)`,
+ expected: `CAST(_utf8mb4'true' as JSON)`,
},
{
name: `false`,
data: []byte{4, 2},
- expected: `CAST(false as JSON)`,
+ expected: `CAST(_utf8mb4'false' as JSON)`,
},
{
name: `null`,
data: []byte{4, 0},
- expected: `CAST(null as JSON)`,
+ expected: `CAST(_utf8mb4'null' as JSON)`,
},
{
name: `-1`,
diff --git a/go/mysql/binlog_dump.go b/go/mysql/binlog_dump.go
index 8383a590c5e..d6768056974 100644
--- a/go/mysql/binlog_dump.go
+++ b/go/mysql/binlog_dump.go
@@ -20,6 +20,7 @@ import (
"encoding/binary"
"io"
+ "vitess.io/vitess/go/mysql/replication"
vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/vterrors"
)
@@ -51,7 +52,7 @@ func (c *Conn) parseComBinlogDump(data []byte) (logFile string, binlogPos uint32
return logFile, binlogPos, nil
}
-func (c *Conn) parseComBinlogDumpGTID(data []byte) (logFile string, logPos uint64, position Position, err error) {
+func (c *Conn) parseComBinlogDumpGTID(data []byte) (logFile string, logPos uint64, position replication.Position, err error) {
// see https://dev.mysql.com/doc/internals/en/com-binlog-dump-gtid.html
pos := 1
@@ -80,7 +81,7 @@ func (c *Conn) parseComBinlogDumpGTID(data []byte) (logFile string, logPos uint6
return logFile, logPos, position, readPacketErr
}
if gtid := string(data[pos : pos+int(dataSize)]); gtid != "" {
- position, err = DecodePosition(gtid)
+ position, err = replication.DecodePosition(gtid)
if err != nil {
return logFile, logPos, position, err
}
diff --git a/go/mysql/binlog_event.go b/go/mysql/binlog_event.go
index 0e9bfc1f155..e58cb9b254c 100644
--- a/go/mysql/binlog_event.go
+++ b/go/mysql/binlog_event.go
@@ -19,6 +19,7 @@ package mysql
import (
"fmt"
+ "vitess.io/vitess/go/mysql/replication"
binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata"
)
@@ -95,7 +96,7 @@ type BinlogEvent interface {
// GTID returns the GTID from the event, and if this event
// also serves as a BEGIN statement.
// This is only valid if IsGTID() returns true.
- GTID(BinlogFormat) (GTID, bool, error)
+ GTID(BinlogFormat) (replication.GTID, bool, error)
// Query returns a Query struct representing data from a QUERY_EVENT.
// This is only valid if IsQuery() returns true.
Query(BinlogFormat) (Query, error)
@@ -107,7 +108,7 @@ type BinlogEvent interface {
Rand(BinlogFormat) (uint64, uint64, error)
// PreviousGTIDs returns the Position from the event.
// This is only valid if IsPreviousGTIDs() returns true.
- PreviousGTIDs(BinlogFormat) (Position, error)
+ PreviousGTIDs(BinlogFormat) (replication.Position, error)
// TableID returns the table ID for a TableMap, UpdateRows,
// WriteRows or DeleteRows event.
diff --git a/go/mysql/binlog_event_filepos.go b/go/mysql/binlog_event_filepos.go
index 8cf7ea11db9..4edc4bb91ff 100644
--- a/go/mysql/binlog_event_filepos.go
+++ b/go/mysql/binlog_event_filepos.go
@@ -19,6 +19,8 @@ package mysql
import (
"encoding/binary"
"fmt"
+
+ "vitess.io/vitess/go/mysql/replication"
)
// filePosBinlogEvent wraps a raw packet buffer and provides methods to examine
@@ -38,7 +40,7 @@ func newFilePosBinlogEvent(buf []byte) *filePosBinlogEvent {
return &filePosBinlogEvent{binlogEvent: binlogEvent(buf)}
}
-func (*filePosBinlogEvent) GTID(BinlogFormat) (GTID, bool, error) {
+func (*filePosBinlogEvent) GTID(BinlogFormat) (replication.GTID, bool, error) {
return nil, false, nil
}
@@ -51,8 +53,8 @@ func (*filePosBinlogEvent) IsGTID() bool {
return false
}
-func (*filePosBinlogEvent) PreviousGTIDs(BinlogFormat) (Position, error) {
- return Position{}, fmt.Errorf("filePos should not provide PREVIOUS_GTIDS_EVENT events")
+func (*filePosBinlogEvent) PreviousGTIDs(BinlogFormat) (replication.Position, error) {
+ return replication.Position{}, fmt.Errorf("filePos should not provide PREVIOUS_GTIDS_EVENT events")
}
// StripChecksum implements BinlogEvent.StripChecksum().
@@ -213,7 +215,7 @@ func (ev filePosFakeEvent) Format() (BinlogFormat, error) {
return BinlogFormat{}, nil
}
-func (ev filePosFakeEvent) GTID(BinlogFormat) (GTID, bool, error) {
+func (ev filePosFakeEvent) GTID(BinlogFormat) (replication.GTID, bool, error) {
return nil, false, nil
}
@@ -229,8 +231,8 @@ func (ev filePosFakeEvent) Rand(BinlogFormat) (uint64, uint64, error) {
return 0, 0, nil
}
-func (ev filePosFakeEvent) PreviousGTIDs(BinlogFormat) (Position, error) {
- return Position{}, nil
+func (ev filePosFakeEvent) PreviousGTIDs(BinlogFormat) (replication.Position, error) {
+ return replication.Position{}, nil
}
func (ev filePosFakeEvent) TableID(BinlogFormat) uint64 {
@@ -270,7 +272,7 @@ func (ev filePosFakeEvent) Bytes() []byte {
// filePosGTIDEvent is a fake GTID event for filePos.
type filePosGTIDEvent struct {
filePosFakeEvent
- gtid filePosGTID
+ gtid replication.FilePosGTID
}
func newFilePosGTIDEvent(file string, pos uint32, timestamp uint32) filePosGTIDEvent {
@@ -278,9 +280,9 @@ func newFilePosGTIDEvent(file string, pos uint32, timestamp uint32) filePosGTIDE
filePosFakeEvent: filePosFakeEvent{
timestamp: timestamp,
},
- gtid: filePosGTID{
- file: file,
- pos: pos,
+ gtid: replication.FilePosGTID{
+ File: file,
+ Pos: pos,
},
}
}
@@ -293,6 +295,6 @@ func (ev filePosGTIDEvent) StripChecksum(f BinlogFormat) (BinlogEvent, []byte, e
return ev, nil, nil
}
-func (ev filePosGTIDEvent) GTID(BinlogFormat) (GTID, bool, error) {
+func (ev filePosGTIDEvent) GTID(BinlogFormat) (replication.GTID, bool, error) {
return ev.gtid, false, nil
}
diff --git a/go/mysql/binlog_event_make.go b/go/mysql/binlog_event_make.go
index 0688fa9540b..52a8c453517 100644
--- a/go/mysql/binlog_event_make.go
+++ b/go/mysql/binlog_event_make.go
@@ -19,6 +19,8 @@ package mysql
import (
"encoding/binary"
"hash/crc32"
+
+ "vitess.io/vitess/go/mysql/replication"
)
const (
@@ -292,7 +294,7 @@ func NewIntVarEvent(f BinlogFormat, s *FakeBinlogStream, typ byte, value uint64)
// NewMariaDBGTIDEvent returns a MariaDB specific GTID event.
// It ignores the Server in the gtid, instead uses the FakeBinlogStream.ServerID.
-func NewMariaDBGTIDEvent(f BinlogFormat, s *FakeBinlogStream, gtid MariadbGTID, hasBegin bool) BinlogEvent {
+func NewMariaDBGTIDEvent(f BinlogFormat, s *FakeBinlogStream, gtid replication.MariadbGTID, hasBegin bool) BinlogEvent {
length := 8 + // sequence
4 + // domain
1 // flags2
diff --git a/go/mysql/binlog_event_make_test.go b/go/mysql/binlog_event_make_test.go
index 7eb94fef848..12d8a54ff97 100644
--- a/go/mysql/binlog_event_make_test.go
+++ b/go/mysql/binlog_event_make_test.go
@@ -23,6 +23,8 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "vitess.io/vitess/go/mysql/replication"
+
"vitess.io/vitess/go/mysql/binlog"
binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata"
)
@@ -149,7 +151,7 @@ func TestMariadDBGTIDEVent(t *testing.T) {
s.ServerID = 0x87654321
// With built-in begin.
- event := NewMariaDBGTIDEvent(f, s, MariadbGTID{Domain: 0, Sequence: 0x123456789abcdef0}, true)
+ event := NewMariaDBGTIDEvent(f, s, replication.MariadbGTID{Domain: 0, Sequence: 0x123456789abcdef0}, true)
require.True(t, event.IsValid(), "NewMariaDBGTIDEvent().IsValid() is false")
require.True(t, event.IsGTID(), "NewMariaDBGTIDEvent().IsGTID() if false")
@@ -160,7 +162,7 @@ func TestMariadDBGTIDEVent(t *testing.T) {
require.NoError(t, err, "NewMariaDBGTIDEvent().GTID() returned error: %v", err)
require.True(t, hasBegin, "NewMariaDBGTIDEvent() didn't store hasBegin properly.")
- mgtid, ok := gtid.(MariadbGTID)
+ mgtid, ok := gtid.(replication.MariadbGTID)
require.True(t, ok, "NewMariaDBGTIDEvent().GTID() returned a non-MariaDBGTID GTID")
if mgtid.Domain != 0 || mgtid.Server != 0x87654321 || mgtid.Sequence != 0x123456789abcdef0 {
@@ -168,7 +170,7 @@ func TestMariadDBGTIDEVent(t *testing.T) {
}
// Without built-in begin.
- event = NewMariaDBGTIDEvent(f, s, MariadbGTID{Domain: 0, Sequence: 0x123456789abcdef0}, false)
+ event = NewMariaDBGTIDEvent(f, s, replication.MariadbGTID{Domain: 0, Sequence: 0x123456789abcdef0}, false)
require.True(t, event.IsValid(), "NewMariaDBGTIDEvent().IsValid() is false")
require.True(t, event.IsGTID(), "NewMariaDBGTIDEvent().IsGTID() if false")
@@ -179,7 +181,7 @@ func TestMariadDBGTIDEVent(t *testing.T) {
require.NoError(t, err, "NewMariaDBGTIDEvent().GTID() returned error: %v", err)
require.False(t, hasBegin, "NewMariaDBGTIDEvent() didn't store hasBegin properly.")
- mgtid, ok = gtid.(MariadbGTID)
+ mgtid, ok = gtid.(replication.MariadbGTID)
require.True(t, ok, "NewMariaDBGTIDEvent().GTID() returned a non-MariaDBGTID GTID")
if mgtid.Domain != 0 || mgtid.Server != 0x87654321 || mgtid.Sequence != 0x123456789abcdef0 {
diff --git a/go/mysql/binlog_event_mariadb.go b/go/mysql/binlog_event_mariadb.go
index 33f858c2f36..f2c0ec8f369 100644
--- a/go/mysql/binlog_event_mariadb.go
+++ b/go/mysql/binlog_event_mariadb.go
@@ -19,6 +19,7 @@ package mysql
import (
"encoding/binary"
+ "vitess.io/vitess/go/mysql/replication"
"vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/vterrors"
)
@@ -59,13 +60,13 @@ func (ev mariadbBinlogEvent) IsGTID() bool {
// 8 sequence number
// 4 domain ID
// 1 flags2
-func (ev mariadbBinlogEvent) GTID(f BinlogFormat) (GTID, bool, error) {
+func (ev mariadbBinlogEvent) GTID(f BinlogFormat) (replication.GTID, bool, error) {
const FLStandalone = 1
data := ev.Bytes()[f.HeaderLength:]
flags2 := data[8+4]
- return MariadbGTID{
+ return replication.MariadbGTID{
Sequence: binary.LittleEndian.Uint64(data[:8]),
Domain: binary.LittleEndian.Uint32(data[8 : 8+4]),
Server: ev.ServerID(),
@@ -73,8 +74,8 @@ func (ev mariadbBinlogEvent) GTID(f BinlogFormat) (GTID, bool, error) {
}
// PreviousGTIDs implements BinlogEvent.PreviousGTIDs().
-func (ev mariadbBinlogEvent) PreviousGTIDs(f BinlogFormat) (Position, error) {
- return Position{}, vterrors.Errorf(vtrpc.Code_INTERNAL, "MariaDB should not provide PREVIOUS_GTIDS_EVENT events")
+func (ev mariadbBinlogEvent) PreviousGTIDs(f BinlogFormat) (replication.Position, error) {
+ return replication.Position{}, vterrors.Errorf(vtrpc.Code_INTERNAL, "MariaDB should not provide PREVIOUS_GTIDS_EVENT events")
}
// StripChecksum implements BinlogEvent.StripChecksum().
diff --git a/go/mysql/binlog_event_mariadb_test.go b/go/mysql/binlog_event_mariadb_test.go
index 1464da0e573..c4eeac39c38 100644
--- a/go/mysql/binlog_event_mariadb_test.go
+++ b/go/mysql/binlog_event_mariadb_test.go
@@ -22,6 +22,8 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+
+ "vitess.io/vitess/go/mysql/replication"
)
// sample event data
@@ -99,7 +101,7 @@ func TestMariadbStandaloneBinlogEventGTID(t *testing.T) {
}
input := mariadbBinlogEvent{binlogEvent: binlogEvent(mariadbStandaloneGTIDEvent)}
- want := MariadbGTID{Domain: 0, Server: 62344, Sequence: 9}
+ want := replication.MariadbGTID{Domain: 0, Server: 62344, Sequence: 9}
got, hasBegin, err := input.GTID(f)
assert.NoError(t, err, "unexpected error: %v", err)
assert.False(t, hasBegin, "unexpected hasBegin")
@@ -115,7 +117,7 @@ func TestMariadbBinlogEventGTID(t *testing.T) {
}
input := mariadbBinlogEvent{binlogEvent: binlogEvent(mariadbBeginGTIDEvent)}
- want := MariadbGTID{Domain: 0, Server: 62344, Sequence: 10}
+ want := replication.MariadbGTID{Domain: 0, Server: 62344, Sequence: 10}
got, hasBegin, err := input.GTID(f)
assert.NoError(t, err, "unexpected error: %v", err)
assert.True(t, hasBegin, "unexpected !hasBegin")
diff --git a/go/mysql/binlog_event_mysql56.go b/go/mysql/binlog_event_mysql56.go
index 2e6cfec2dfa..3f931310ba9 100644
--- a/go/mysql/binlog_event_mysql56.go
+++ b/go/mysql/binlog_event_mysql56.go
@@ -19,6 +19,7 @@ package mysql
import (
"encoding/binary"
+ "vitess.io/vitess/go/mysql/replication"
"vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/vterrors"
)
@@ -59,22 +60,22 @@ func (ev mysql56BinlogEvent) IsGTID() bool {
// 1 flags
// 16 SID (server UUID)
// 8 GNO (sequence number, signed int)
-func (ev mysql56BinlogEvent) GTID(f BinlogFormat) (GTID, bool, error) {
+func (ev mysql56BinlogEvent) GTID(f BinlogFormat) (replication.GTID, bool, error) {
data := ev.Bytes()[f.HeaderLength:]
- var sid SID
+ var sid replication.SID
copy(sid[:], data[1:1+16])
gno := int64(binary.LittleEndian.Uint64(data[1+16 : 1+16+8]))
- return Mysql56GTID{Server: sid, Sequence: gno}, false /* hasBegin */, nil
+ return replication.Mysql56GTID{Server: sid, Sequence: gno}, false /* hasBegin */, nil
}
// PreviousGTIDs implements BinlogEvent.PreviousGTIDs().
-func (ev mysql56BinlogEvent) PreviousGTIDs(f BinlogFormat) (Position, error) {
+func (ev mysql56BinlogEvent) PreviousGTIDs(f BinlogFormat) (replication.Position, error) {
data := ev.Bytes()[f.HeaderLength:]
- set, err := NewMysql56GTIDSetFromSIDBlock(data)
+ set, err := replication.NewMysql56GTIDSetFromSIDBlock(data)
if err != nil {
- return Position{}, err
+ return replication.Position{}, err
}
- return Position{
+ return replication.Position{
GTIDSet: set,
}, nil
}
diff --git a/go/mysql/binlog_event_mysql56_test.go b/go/mysql/binlog_event_mysql56_test.go
index 86b58862ef9..e5fa3545278 100644
--- a/go/mysql/binlog_event_mysql56_test.go
+++ b/go/mysql/binlog_event_mysql56_test.go
@@ -23,6 +23,8 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+
+ "vitess.io/vitess/go/mysql/replication"
)
// Sample event data for MySQL 5.6.
@@ -79,25 +81,14 @@ func TestMysql56GTID(t *testing.T) {
require.NoError(t, err, "StripChecksum() error: %v", err)
require.True(t, input.IsGTID(), "IsGTID() = false, want true")
- want, _ := parseMysql56GTID("439192bd-f37c-11e4-bbeb-0242ac11035a:4")
+ want := replication.Mysql56GTID{
+ Server: replication.SID{0x43, 0x91, 0x92, 0xbd, 0xf3, 0x7c, 0x11, 0xe4, 0xbb, 0xeb, 0x2, 0x42, 0xac, 0x11, 0x3, 0x5a},
+ Sequence: 4,
+ }
got, hasBegin, err := input.GTID(format)
require.NoError(t, err, "GTID() error: %v", err)
assert.False(t, hasBegin, "GTID() returned hasBegin")
assert.Equal(t, want, got, "GTID() = %#v, want %#v", got, want)
-
-}
-
-func TestMysql56ParseGTID(t *testing.T) {
- input := "00010203-0405-0607-0809-0A0B0C0D0E0F:56789"
- want := Mysql56GTID{
- Server: SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
- Sequence: 56789,
- }
-
- got, err := parseMysql56GTID(input)
- require.NoError(t, err, "unexpected error: %v", err)
- assert.Equal(t, want, got, "(&mysql56{}).ParseGTID(%#v) = %#v, want %#v", input, got, want)
-
}
func TestMysql56DecodeTransactionPayload(t *testing.T) {
@@ -148,13 +139,13 @@ func TestMysql56DecodeTransactionPayload(t *testing.T) {
func TestMysql56ParsePosition(t *testing.T) {
input := "00010203-0405-0607-0809-0a0b0c0d0e0f:1-2"
- sid := SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
- var set GTIDSet = Mysql56GTIDSet{}
- set = set.AddGTID(Mysql56GTID{Server: sid, Sequence: 1})
- set = set.AddGTID(Mysql56GTID{Server: sid, Sequence: 2})
- want := Position{GTIDSet: set}
+ sid := replication.SID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
+ var set replication.GTIDSet = replication.Mysql56GTIDSet{}
+ set = set.AddGTID(replication.Mysql56GTID{Server: sid, Sequence: 1})
+ set = set.AddGTID(replication.Mysql56GTID{Server: sid, Sequence: 2})
+ want := replication.Position{GTIDSet: set}
- got, err := ParsePosition(Mysql56FlavorID, input)
+ got, err := replication.ParsePosition(replication.Mysql56FlavorID, input)
assert.NoError(t, err, "unexpected error: %v", err)
assert.True(t, got.Equal(want), "(&mysql56{}).ParsePosition(%#v) = %#v, want %#v", input, got, want)
diff --git a/go/mysql/client.go b/go/mysql/client.go
index 487f1d0fe52..f6c4e17cbfc 100644
--- a/go/mysql/client.go
+++ b/go/mysql/client.go
@@ -27,6 +27,7 @@ import (
"time"
"vitess.io/vitess/go/mysql/collations"
+ "vitess.io/vitess/go/mysql/sqlerror"
vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/vterrors"
"vitess.io/vitess/go/vt/vttls"
@@ -94,11 +95,11 @@ func Connect(ctx context.Context, params *ConnParams) (*Conn, error) {
// should return a 2003.
if netProto == "tcp" {
status <- connectResult{
- err: NewSQLError(CRConnHostError, SSUnknownSQLState, "net.Dial(%v) failed: %v", addr, err),
+ err: sqlerror.NewSQLError(sqlerror.CRConnHostError, sqlerror.SSUnknownSQLState, "net.Dial(%v) failed: %v", addr, err),
}
} else {
status <- connectResult{
- err: NewSQLError(CRConnectionError, SSUnknownSQLState, "net.Dial(%v) to local server failed: %v", addr, err),
+ err: sqlerror.NewSQLError(sqlerror.CRConnectionError, sqlerror.SSUnknownSQLState, "net.Dial(%v) to local server failed: %v", addr, err),
}
}
return
@@ -178,11 +179,11 @@ func (c *Conn) Ping() error {
data[pos] = ComPing
if err := c.writeEphemeralPacket(); err != nil {
- return NewSQLError(CRServerGone, SSUnknownSQLState, "%v", err)
+ return sqlerror.NewSQLError(sqlerror.CRServerGone, sqlerror.SSUnknownSQLState, "%v", err)
}
data, err := c.readEphemeralPacket()
if err != nil {
- return NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err)
+ return sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err)
}
defer c.recycleReadPacket()
switch data[0] {
@@ -207,7 +208,7 @@ func (c *Conn) clientHandshake(params *ConnParams) error {
// Wait for the server initial handshake packet, and parse it.
data, err := c.readPacket()
if err != nil {
- return NewSQLError(CRServerLost, "", "initial packet read failed: %v", err)
+ return sqlerror.NewSQLError(sqlerror.CRServerLost, "", "initial packet read failed: %v", err)
}
capabilities, salt, err := c.parseInitialHandshakePacket(data)
if err != nil {
@@ -218,7 +219,7 @@ func (c *Conn) clientHandshake(params *ConnParams) error {
// Sanity check.
if capabilities&CapabilityClientProtocol41 == 0 {
- return NewSQLError(CRVersionError, SSUnknownSQLState, "cannot connect to servers earlier than 4.1")
+ return sqlerror.NewSQLError(sqlerror.CRVersionError, sqlerror.SSUnknownSQLState, "cannot connect to servers earlier than 4.1")
}
// Remember a subset of the capabilities, so we can use them
@@ -238,7 +239,7 @@ func (c *Conn) clientHandshake(params *ConnParams) error {
// If client asked for SSL, but server doesn't support it,
// stop right here.
if params.SslRequired() && capabilities&CapabilityClientSSL == 0 {
- return NewSQLError(CRSSLConnectionError, SSUnknownSQLState, "server doesn't support SSL but client asked for it")
+ return sqlerror.NewSQLError(sqlerror.CRSSLConnectionError, sqlerror.SSUnknownSQLState, "server doesn't support SSL but client asked for it")
}
// The ServerName to verify depends on what the hostname is.
@@ -259,13 +260,13 @@ func (c *Conn) clientHandshake(params *ConnParams) error {
tlsVersion, err := vttls.TLSVersionToNumber(params.TLSMinVersion)
if err != nil {
- return NewSQLError(CRSSLConnectionError, SSUnknownSQLState, "error parsing minimal TLS version: %v", err)
+ return sqlerror.NewSQLError(sqlerror.CRSSLConnectionError, sqlerror.SSUnknownSQLState, "error parsing minimal TLS version: %v", err)
}
// Build the TLS config.
clientConfig, err := vttls.ClientConfig(params.EffectiveSslMode(), params.SslCert, params.SslKey, params.SslCa, params.SslCrl, serverName, tlsVersion)
if err != nil {
- return NewSQLError(CRSSLConnectionError, SSUnknownSQLState, "error loading client cert and ca: %v", err)
+ return sqlerror.NewSQLError(sqlerror.CRSSLConnectionError, sqlerror.SSUnknownSQLState, "error loading client cert and ca: %v", err)
}
// Send the SSLRequest packet.
@@ -296,7 +297,7 @@ func (c *Conn) clientHandshake(params *ConnParams) error {
} else if params.Flags&CapabilityClientSessionTrack == CapabilityClientSessionTrack {
// If client asked for ClientSessionTrack, but server doesn't support it,
// stop right here.
- return NewSQLError(CRSSLConnectionError, SSUnknownSQLState, "server doesn't support ClientSessionTrack but client asked for it")
+ return sqlerror.NewSQLError(sqlerror.CRSSLConnectionError, sqlerror.SSUnknownSQLState, "server doesn't support ClientSessionTrack but client asked for it")
}
// Build and send our handshake response 41.
@@ -321,7 +322,7 @@ func (c *Conn) clientHandshake(params *ConnParams) error {
// Wait for response, should be OK.
response, err := c.readPacket()
if err != nil {
- return NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err)
+ return sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err)
}
switch response[0] {
case OKPacket:
@@ -331,7 +332,7 @@ func (c *Conn) clientHandshake(params *ConnParams) error {
return ParseErrorPacket(response)
default:
// FIXME(alainjobart) handle extra auth cases and so on.
- return NewSQLError(CRServerHandshakeErr, SSUnknownSQLState, "initial server response is asking for more information, not implemented yet: %v", response)
+ return sqlerror.NewSQLError(sqlerror.CRServerHandshakeErr, sqlerror.SSUnknownSQLState, "initial server response is asking for more information, not implemented yet: %v", response)
}
}
@@ -346,7 +347,7 @@ func (c *Conn) parseInitialHandshakePacket(data []byte) (uint32, []byte, error)
// Protocol version.
pver, pos, ok := readByte(data, pos)
if !ok {
- return 0, nil, NewSQLError(CRVersionError, SSUnknownSQLState, "parseInitialHandshakePacket: packet has no protocol version")
+ return 0, nil, sqlerror.NewSQLError(sqlerror.CRVersionError, sqlerror.SSUnknownSQLState, "parseInitialHandshakePacket: packet has no protocol version")
}
// Server is allowed to immediately send ERR packet
@@ -355,41 +356,41 @@ func (c *Conn) parseInitialHandshakePacket(data []byte) (uint32, []byte, error)
// Normally there would be a 1-byte sql_state_marker field and a 5-byte
// sql_state field here, but docs say these will not be present in this case.
errorMsg, _, _ := readEOFString(data, pos)
- return 0, nil, NewSQLError(CRServerHandshakeErr, SSUnknownSQLState, "immediate error from server errorCode=%v errorMsg=%v", errorCode, errorMsg)
+ return 0, nil, sqlerror.NewSQLError(sqlerror.CRServerHandshakeErr, sqlerror.SSUnknownSQLState, "immediate error from server errorCode=%v errorMsg=%v", errorCode, errorMsg)
}
if pver != protocolVersion {
- return 0, nil, NewSQLError(CRVersionError, SSUnknownSQLState, "bad protocol version: %v", pver)
+ return 0, nil, sqlerror.NewSQLError(sqlerror.CRVersionError, sqlerror.SSUnknownSQLState, "bad protocol version: %v", pver)
}
// Read the server version.
c.ServerVersion, pos, ok = readNullString(data, pos)
if !ok {
- return 0, nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "parseInitialHandshakePacket: packet has no server version")
+ return 0, nil, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "parseInitialHandshakePacket: packet has no server version")
}
// Read the connection id.
c.ConnectionID, pos, ok = readUint32(data, pos)
if !ok {
- return 0, nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "parseInitialHandshakePacket: packet has no connection id")
+ return 0, nil, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "parseInitialHandshakePacket: packet has no connection id")
}
// Read the first part of the auth-plugin-data
authPluginData, pos, ok := readBytes(data, pos, 8)
if !ok {
- return 0, nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "parseInitialHandshakePacket: packet has no auth-plugin-data-part-1")
+ return 0, nil, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "parseInitialHandshakePacket: packet has no auth-plugin-data-part-1")
}
// One byte filler, 0. We don't really care about the value.
_, pos, ok = readByte(data, pos)
if !ok {
- return 0, nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "parseInitialHandshakePacket: packet has no filler")
+ return 0, nil, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "parseInitialHandshakePacket: packet has no filler")
}
// Lower 2 bytes of the capability flags.
capLower, pos, ok := readUint16(data, pos)
if !ok {
- return 0, nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "parseInitialHandshakePacket: packet has no capability flags (lower 2 bytes)")
+ return 0, nil, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "parseInitialHandshakePacket: packet has no capability flags (lower 2 bytes)")
}
var capabilities = uint32(capLower)
@@ -401,20 +402,20 @@ func (c *Conn) parseInitialHandshakePacket(data []byte) (uint32, []byte, error)
// Character set.
characterSet, pos, ok := readByte(data, pos)
if !ok {
- return 0, nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "parseInitialHandshakePacket: packet has no character set")
+ return 0, nil, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "parseInitialHandshakePacket: packet has no character set")
}
c.CharacterSet = collations.ID(characterSet)
// Status flags. Ignored.
_, pos, ok = readUint16(data, pos)
if !ok {
- return 0, nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "parseInitialHandshakePacket: packet has no status flags")
+ return 0, nil, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "parseInitialHandshakePacket: packet has no status flags")
}
// Upper 2 bytes of the capability flags.
capUpper, pos, ok := readUint16(data, pos)
if !ok {
- return 0, nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "parseInitialHandshakePacket: packet has no capability flags (upper 2 bytes)")
+ return 0, nil, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "parseInitialHandshakePacket: packet has no capability flags (upper 2 bytes)")
}
capabilities += uint32(capUpper) << 16
@@ -424,13 +425,13 @@ func (c *Conn) parseInitialHandshakePacket(data []byte) (uint32, []byte, error)
if capabilities&CapabilityClientPluginAuth != 0 {
authPluginDataLength, pos, ok = readByte(data, pos)
if !ok {
- return 0, nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "parseInitialHandshakePacket: packet has no length of auth-plugin-data")
+ return 0, nil, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "parseInitialHandshakePacket: packet has no length of auth-plugin-data")
}
} else {
// One byte filler, 0. We don't really care about the value.
_, pos, ok = readByte(data, pos)
if !ok {
- return 0, nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "parseInitialHandshakePacket: packet has no length of auth-plugin-data filler")
+ return 0, nil, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "parseInitialHandshakePacket: packet has no length of auth-plugin-data filler")
}
}
@@ -447,12 +448,12 @@ func (c *Conn) parseInitialHandshakePacket(data []byte) (uint32, []byte, error)
var authPluginDataPart2 []byte
authPluginDataPart2, pos, ok = readBytes(data, pos, l)
if !ok {
- return 0, nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "parseInitialHandshakePacket: packet has no auth-plugin-data-part-2")
+ return 0, nil, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "parseInitialHandshakePacket: packet has no auth-plugin-data-part-2")
}
// The last byte has to be 0, and is not part of the data.
if authPluginDataPart2[l-1] != 0 {
- return 0, nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "parseInitialHandshakePacket: auth-plugin-data-part-2 is not 0 terminated")
+ return 0, nil, sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "parseInitialHandshakePacket: auth-plugin-data-part-2 is not 0 terminated")
}
authPluginData = append(authPluginData, authPluginDataPart2[0:l-1]...)
}
@@ -510,7 +511,7 @@ func (c *Conn) writeSSLRequest(capabilities uint32, characterSet uint8, params *
// And send it as is.
if err := c.writeEphemeralPacket(); err != nil {
- return NewSQLError(CRServerLost, SSUnknownSQLState, "cannot send SSLRequest: %v", err)
+ return sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "cannot send SSLRequest: %v", err)
}
return nil
}
@@ -608,11 +609,11 @@ func (c *Conn) writeHandshakeResponse41(capabilities uint32, scrambledPassword [
// Sanity-check the length.
if pos != len(data) {
- return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "writeHandshakeResponse41: only packed %v bytes, out of %v allocated", pos, len(data))
+ return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "writeHandshakeResponse41: only packed %v bytes, out of %v allocated", pos, len(data))
}
if err := c.writeEphemeralPacket(); err != nil {
- return NewSQLError(CRServerLost, SSUnknownSQLState, "cannot send HandshakeResponse41: %v", err)
+ return sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "cannot send HandshakeResponse41: %v", err)
}
return nil
}
@@ -622,7 +623,7 @@ func (c *Conn) writeHandshakeResponse41(capabilities uint32, scrambledPassword [
func (c *Conn) handleAuthResponse(params *ConnParams) error {
response, err := c.readPacket()
if err != nil {
- return NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err)
+ return sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err)
}
switch response[0] {
@@ -642,7 +643,7 @@ func (c *Conn) handleAuthResponse(params *ConnParams) error {
case ErrPacket:
return ParseErrorPacket(response)
default:
- return NewSQLError(CRServerHandshakeErr, SSUnknownSQLState, "initial server response cannot be parsed: %v", response)
+ return sqlerror.NewSQLError(sqlerror.CRServerHandshakeErr, sqlerror.SSUnknownSQLState, "initial server response cannot be parsed: %v", response)
}
return nil
@@ -654,7 +655,7 @@ func (c *Conn) handleAuthSwitchPacket(params *ConnParams, response []byte) error
var salt []byte
c.authPluginName, salt, err = parseAuthSwitchRequest(response)
if err != nil {
- return NewSQLError(CRServerHandshakeErr, SSUnknownSQLState, "cannot parse auth switch request: %v", err)
+ return sqlerror.NewSQLError(sqlerror.CRServerHandshakeErr, sqlerror.SSUnknownSQLState, "cannot parse auth switch request: %v", err)
}
if salt != nil {
c.salt = salt
@@ -675,7 +676,7 @@ func (c *Conn) handleAuthSwitchPacket(params *ConnParams, response []byte) error
return err
}
default:
- return NewSQLError(CRServerHandshakeErr, SSUnknownSQLState, "server asked for unsupported auth method: %v", c.authPluginName)
+ return sqlerror.NewSQLError(sqlerror.CRServerHandshakeErr, sqlerror.SSUnknownSQLState, "server asked for unsupported auth method: %v", c.authPluginName)
}
// The response could be an OKPacket, AuthMoreDataPacket or ErrPacket
@@ -717,7 +718,7 @@ func (c *Conn) handleAuthMoreDataPacket(data byte, params *ConnParams) error {
// Next packet should either be an OKPacket or ErrPacket
return c.handleAuthResponse(params)
default:
- return NewSQLError(CRServerHandshakeErr, SSUnknownSQLState, "cannot parse AuthMoreDataPacket: %v", data)
+ return sqlerror.NewSQLError(sqlerror.CRServerHandshakeErr, sqlerror.SSUnknownSQLState, "cannot parse AuthMoreDataPacket: %v", data)
}
}
@@ -747,7 +748,7 @@ func (c *Conn) requestPublicKey() (rsaKey *rsa.PublicKey, err error) {
response, err := c.readPacket()
if err != nil {
- return nil, NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err)
+ return nil, sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err)
}
// Server should respond with a AuthMoreDataPacket containing the public key
diff --git a/go/mysql/client_test.go b/go/mysql/client_test.go
index 73d48df1d35..d7424b29516 100644
--- a/go/mysql/client_test.go
+++ b/go/mysql/client_test.go
@@ -32,16 +32,18 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "vitess.io/vitess/go/mysql/sqlerror"
+
"vitess.io/vitess/go/vt/tlstest"
"vitess.io/vitess/go/vt/vttls"
)
// assertSQLError makes sure we get the right error.
-func assertSQLError(t *testing.T, err error, code ErrorCode, sqlState, subtext, query, pattern string) {
+func assertSQLError(t *testing.T, err error, code sqlerror.ErrorCode, sqlState, subtext, query, pattern string) {
t.Helper()
require.Error(t, err, "was expecting SQLError %v / %v / %v but got no error.", code, sqlState, subtext)
- serr, ok := err.(*SQLError)
+ serr, ok := err.(*sqlerror.SQLError)
require.True(t, ok, "was expecting SQLError %v / %v / %v but got: %v", code, sqlState, subtext, err)
require.Equal(t, code, serr.Num, "was expecting SQLError %v / %v / %v but got code %v", code, sqlState, subtext, serr.Num)
require.Equal(t, sqlState, serr.State, "was expecting SQLError %v / %v / %v but got state %v", code, sqlState, subtext, serr.State)
@@ -110,14 +112,14 @@ func TestConnectTimeout(t *testing.T) {
}()
ctx = context.Background()
_, err = Connect(ctx, params)
- assertSQLError(t, err, CRServerLost, SSUnknownSQLState, "initial packet read failed", "", "")
+ assertSQLError(t, err, sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "initial packet read failed", "", "")
// Now close the listener. Connect should fail right away,
// check the error.
listener.Close()
wg.Wait()
_, err = Connect(ctx, params)
- assertSQLError(t, err, CRConnHostError, SSUnknownSQLState, "connection refused", "", "")
+ assertSQLError(t, err, sqlerror.CRConnHostError, sqlerror.SSUnknownSQLState, "connection refused", "", "")
// Tests a connection where Dial to a unix socket fails
// properly returns the right error. To simulate exactly the
@@ -131,7 +133,7 @@ func TestConnectTimeout(t *testing.T) {
_, err = Connect(ctx, params)
os.Remove(name)
t.Log(err)
- assertSQLError(t, err, CRConnectionError, SSUnknownSQLState, "connection refused", "", "net\\.Dial\\(([a-z0-9A-Z_\\/]*)\\) to local server failed:")
+ assertSQLError(t, err, sqlerror.CRConnectionError, sqlerror.SSUnknownSQLState, "connection refused", "", "net\\.Dial\\(([a-z0-9A-Z_\\/]*)\\) to local server failed:")
}
// TestTLSClientDisabled creates a Server with TLS support, then connects
@@ -149,7 +151,7 @@ func TestTLSClientDisabled(t *testing.T) {
// Below, we are enabling --ssl-verify-server-cert, which adds
// a check that the common name of the certificate matches the
// server host name we connect to.
- l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false)
+ l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0)
require.NoError(t, err)
defer l.Close()
@@ -221,7 +223,7 @@ func TestTLSClientPreferredDefault(t *testing.T) {
// Below, we are enabling --ssl-verify-server-cert, which adds
// a check that the common name of the certificate matches the
// server host name we connect to.
- l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false)
+ l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0)
require.NoError(t, err)
defer l.Close()
@@ -294,7 +296,7 @@ func TestTLSClientRequired(t *testing.T) {
// Below, we are enabling --ssl-verify-server-cert, which adds
// a check that the common name of the certificate matches the
// server host name we connect to.
- l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false)
+ l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0)
require.NoError(t, err)
defer l.Close()
@@ -341,7 +343,7 @@ func testTLSClientVerifyCA(t *testing.T) {
// Below, we are enabling --ssl-verify-server-cert, which adds
// a check that the common name of the certificate matches the
// server host name we connect to.
- l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false)
+ l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0)
require.NoError(t, err)
defer l.Close()
@@ -424,7 +426,7 @@ func TestTLSClientVerifyIdentity(t *testing.T) {
// Below, we are enabling --ssl-verify-server-cert, which adds
// a check that the common name of the certificate matches the
// server host name we connect to.
- l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false)
+ l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0)
require.NoError(t, err)
defer l.Close()
diff --git a/go/mysql/collations/charset/convert.go b/go/mysql/collations/charset/convert.go
index 3904329654c..bc51e9b8377 100644
--- a/go/mysql/collations/charset/convert.go
+++ b/go/mysql/collations/charset/convert.go
@@ -19,6 +19,8 @@ package charset
import (
"fmt"
"unicode/utf8"
+
+ "vitess.io/vitess/go/hack"
)
func failedConversionError(from, to Charset, input []byte) error {
@@ -128,6 +130,79 @@ func Convert(dst []byte, dstCharset Charset, src []byte, srcCharset Charset) ([]
}
}
+func Expand(dst []rune, src []byte, srcCharset Charset) []rune {
+ switch srcCharset := srcCharset.(type) {
+ case Charset_utf8mb3, Charset_utf8mb4:
+ if dst == nil {
+ return []rune(string(src))
+ }
+ dst = make([]rune, 0, len(src))
+ for _, cp := range string(src) {
+ dst = append(dst, cp)
+ }
+ return dst
+ case Charset_binary:
+ if dst == nil {
+ dst = make([]rune, 0, len(src))
+ }
+ for _, c := range src {
+ dst = append(dst, rune(c))
+ }
+ return dst
+ default:
+ if dst == nil {
+ dst = make([]rune, 0, len(src))
+ }
+ for len(src) > 0 {
+ cp, width := srcCharset.DecodeRune(src)
+ src = src[width:]
+ dst = append(dst, cp)
+ }
+ return dst
+ }
+}
+
+func Collapse(dst []byte, src []rune, dstCharset Charset) []byte {
+ switch dstCharset := dstCharset.(type) {
+ case Charset_utf8mb3, Charset_utf8mb4:
+ if dst == nil {
+ return hack.StringBytes(string(src))
+ }
+ return append(dst, hack.StringBytes(string(src))...)
+ case Charset_binary:
+ if dst == nil {
+ dst = make([]byte, 0, len(src))
+ }
+ for _, b := range src {
+ dst = append(dst, byte(b))
+ }
+ return dst
+ default:
+ nDst := 0
+ if dst == nil {
+ dst = make([]byte, len(src)*dstCharset.MaxWidth())
+ } else {
+ nDst = len(dst)
+ dst = dst[:cap(dst)]
+ }
+ for _, c := range src {
+ if len(dst)-nDst < 4 {
+ newDst := make([]byte, len(dst)*2)
+ copy(newDst, dst[:nDst])
+ dst = newDst
+ }
+ w := dstCharset.EncodeRune(dst[nDst:], c)
+ if w < 0 {
+ if w = dstCharset.EncodeRune(dst[nDst:], '?'); w < 0 {
+ break
+ }
+ }
+ nDst += w
+ }
+ return dst[:nDst]
+ }
+}
+
func ConvertFromUTF8(dst []byte, dstCharset Charset, src []byte) ([]byte, error) {
return Convert(dst, dstCharset, src, Charset_utf8mb4{})
}
diff --git a/go/mysql/collations/charset/korean/tables.go b/go/mysql/collations/charset/korean/tables.go
index 0480e85c4aa..7f7ad3e4264 100644
--- a/go/mysql/collations/charset/korean/tables.go
+++ b/go/mysql/collations/charset/korean/tables.go
@@ -17056,8 +17056,6 @@ var decode = [...]uint16{
17629: 0x8A70,
}
-const numEncodeTables = 7
-
// encodeX are the encoding tables from Unicode to EUC-KR code,
// sorted by decreasing length.
// encode0: 20893 entries for runes in [19968, 40861).
diff --git a/go/mysql/collations/charset/simplifiedchinese/tables.go b/go/mysql/collations/charset/simplifiedchinese/tables.go
index 415f52a1116..645127580f6 100644
--- a/go/mysql/collations/charset/simplifiedchinese/tables.go
+++ b/go/mysql/collations/charset/simplifiedchinese/tables.go
@@ -22091,8 +22091,6 @@ var decode = [...]uint16{
23844: 0x4DAE,
}
-const numEncodeTables = 5
-
// encodeX are the encoding tables from Unicode to GBK code,
// sorted by decreasing length.
// encode0: 28965 entries for runes in [11905, 40870).
diff --git a/go/mysql/collations/coercion.go b/go/mysql/collations/coercion.go
index 8e72ebf3c37..8b66c818cc0 100644
--- a/go/mysql/collations/coercion.go
+++ b/go/mysql/collations/coercion.go
@@ -19,8 +19,6 @@ package collations
import (
"fmt"
"unsafe"
-
- "vitess.io/vitess/go/mysql/collations/charset"
)
func init() {
@@ -95,11 +93,6 @@ const (
RepertoireUnicode
)
-// Coercion is a function that will transform either the given argument
-// arguments of the function into a specific character set. The `dst` argument
-// will be used as the destination of the coerced argument, but it can be nil.
-type Coercion func(dst, in []byte) ([]byte, error)
-
// TypedCollation is the Collation of a SQL expression, including its coercibility
// and repertoire.
type TypedCollation struct {
@@ -112,208 +105,13 @@ func (tc TypedCollation) Valid() bool {
return tc.Collation != Unknown
}
-func checkCompatibleCollations(
- left Collation, leftCoercibility Coercibility, leftRepertoire Repertoire,
- right Collation, rightCoercibility Coercibility, rightRepertoire Repertoire,
-) bool {
- leftCS := left.Charset()
- rightCS := right.Charset()
-
- switch leftCS.(type) {
- case charset.Charset_utf8mb4:
- if leftCoercibility <= rightCoercibility {
- return true
- }
-
- case charset.Charset_utf32:
- switch {
- case leftCoercibility < rightCoercibility:
- return true
- case leftCoercibility == rightCoercibility:
- if !charset.IsUnicode(rightCS) {
- return true
- }
- if !left.IsBinary() {
- return true
- }
- }
-
- case charset.Charset_utf8mb3, charset.Charset_ucs2, charset.Charset_utf16, charset.Charset_utf16le:
- switch {
- case leftCoercibility < rightCoercibility:
- return true
- case leftCoercibility == rightCoercibility:
- if !charset.IsUnicode(rightCS) {
- return true
- }
- }
- }
-
- if rightRepertoire == RepertoireASCII {
- switch {
- case leftCoercibility < rightCoercibility:
- return true
- case leftCoercibility == rightCoercibility:
- if leftRepertoire == RepertoireUnicode {
- return true
- }
- }
- }
-
- return false
-}
-
-// CoercionOptions is used to configure how aggressive the algorithm can be
-// when merging two different collations by transcoding them.
-type CoercionOptions struct {
- // ConvertToSuperset allows merging two different collations as long
- // as the charset of one of them is a strict superset of the other. In
- // order to operate on the two expressions, one of them will need to
- // be transcoded. This transcoding will always be safe because the string
- // with the smallest repertoire will be transcoded to its superset, which
- // cannot fail.
- ConvertToSuperset bool
-
- // ConvertWithCoercion allows merging two different collations by forcing
- // a coercion as long as the coercibility of the two sides is lax enough.
- // This will force a transcoding of one of the expressions even if their
- // respective charsets are not a strict superset, so the resulting transcoding
- // CAN fail depending on the content of their strings.
- ConvertWithCoercion bool
-}
-
-// MergeCollations returns a Coercion function for a pair of TypedCollation based
-// on their coercibility.
-//
-// The function takes the typed collations for the two sides of a text operation
-// (namely, a comparison or concatenation of two textual expressions). These typed
-// collations includes the actual collation for the expression on each size, their
-// coercibility values (see: Coercibility) and their respective repertoires,
-// and returns the target collation (i.e. the collation into which the two expressions
-// must be coerced, and a Coercion function. The Coercion function can be called repeatedly
-// with the different values for the two expressions and will transcode either
-// the left-hand or right-hand value to the appropriate charset so it can be
-// collated against the other value.
-//
-// If the collations for both sides of the expressions are the same, the returned
-// Coercion function will be a no-op. Likewise, if the two collations are not the same,
-// but they are compatible and have the same charset, the Coercion function will also
-// be a no-op.
-//
-// If the collations for both sides of the expression are not compatible, an error
-// will be returned and the returned TypedCollation and Coercion will be nil.
-func (env *Environment) MergeCollations(left, right TypedCollation, opt CoercionOptions) (TypedCollation, Coercion, Coercion, error) {
- leftColl := left.Collation.Get()
- rightColl := right.Collation.Get()
- if leftColl == nil || rightColl == nil {
- return TypedCollation{}, nil, nil, fmt.Errorf("unsupported TypeCollationID: %v / %v", left.Collation, right.Collation)
- }
-
- leftCS := leftColl.Charset()
- rightCS := rightColl.Charset()
-
- if left.Coercibility == CoerceExplicit && right.Coercibility == CoerceExplicit {
- if left.Collation != right.Collation {
- goto cannotCoerce
- }
- }
-
- if leftCS.Name() == rightCS.Name() {
- switch {
- case left.Coercibility < right.Coercibility:
- left.Repertoire |= right.Repertoire
- return left, nil, nil, nil
-
- case left.Coercibility > right.Coercibility:
- right.Repertoire |= left.Repertoire
- return right, nil, nil, nil
-
- case left.Collation == right.Collation:
- left.Repertoire |= right.Repertoire
- return left, nil, nil, nil
- }
-
- if left.Coercibility == CoerceExplicit {
- goto cannotCoerce
- }
-
- leftCsBin := leftColl.IsBinary()
- rightCsBin := rightColl.IsBinary()
-
- switch {
- case leftCsBin && rightCsBin:
- left.Coercibility = CoerceNone
- return left, nil, nil, nil
-
- case leftCsBin:
- return left, nil, nil, nil
-
- case rightCsBin:
- return right, nil, nil, nil
- }
-
- defaults := env.byCharset[leftCS.Name()]
- return TypedCollation{
- Collation: defaults.Binary.ID(),
- Coercibility: CoerceNone,
- Repertoire: left.Repertoire | right.Repertoire,
- }, nil, nil, nil
- }
-
- if _, leftIsBinary := leftColl.(*Collation_binary); leftIsBinary {
- if left.Coercibility <= right.Coercibility {
- return left, nil, nil, nil
- }
- goto coerceToRight
- }
- if _, rightIsBinary := rightColl.(*Collation_binary); rightIsBinary {
- if left.Coercibility >= right.Coercibility {
- return right, nil, nil, nil
- }
- goto coerceToLeft
- }
-
- if opt.ConvertToSuperset {
- if checkCompatibleCollations(leftColl, left.Coercibility, left.Repertoire, rightColl, right.Coercibility, right.Repertoire) {
- goto coerceToLeft
- }
- if checkCompatibleCollations(rightColl, right.Coercibility, right.Repertoire, leftColl, left.Coercibility, left.Repertoire) {
- goto coerceToRight
- }
- }
-
- if opt.ConvertWithCoercion {
- if left.Coercibility < right.Coercibility && right.Coercibility > CoerceImplicit {
- goto coerceToLeft
- }
- if right.Coercibility < left.Coercibility && left.Coercibility > CoerceImplicit {
- goto coerceToRight
- }
- }
-
-cannotCoerce:
- return TypedCollation{}, nil, nil, fmt.Errorf("Illegal mix of collations (%s,%s) and (%s,%s)",
- leftColl.Name(), left.Coercibility, rightColl.Name(), right.Coercibility)
-
-coerceToLeft:
- return left, nil,
- func(dst, in []byte) ([]byte, error) {
- return charset.Convert(dst, leftCS, in, rightCS)
- }, nil
-
-coerceToRight:
- return right,
- func(dst, in []byte) ([]byte, error) {
- return charset.Convert(dst, rightCS, in, leftCS)
- }, nil, nil
-}
-
func (env *Environment) EnsureCollate(fromID, toID ID) error {
// these two lookups should never fail
- from := fromID.Get()
- to := toID.Get()
- if from.Charset().Name() != to.Charset().Name() {
- return fmt.Errorf("COLLATION '%s' is not valid for CHARACTER SET '%s'", to.Name(), from.Charset().Name())
+ fromCharsetName := env.LookupCharsetName(fromID)
+ toCharsetName := env.LookupCharsetName(toID)
+ if fromCharsetName != toCharsetName {
+ toCollName := env.LookupName(toID)
+ return fmt.Errorf("COLLATION '%s' is not valid for CHARACTER SET '%s'", toCollName, fromCharsetName)
}
return nil
}
diff --git a/go/mysql/collations/collation.go b/go/mysql/collations/collation.go
index 172f5d4552f..aebc4dc9646 100644
--- a/go/mysql/collations/collation.go
+++ b/go/mysql/collations/collation.go
@@ -16,167 +16,10 @@ limitations under the License.
package collations
-import (
- "math"
-
- "vitess.io/vitess/go/mysql/collations/charset"
- "vitess.io/vitess/go/vt/vthash"
-)
-
//go:generate go run ./tools/makecolldata/ --embed=true
-// CaseAwareCollation implements lowercase and uppercase conventions for collations.
-type CaseAwareCollation interface {
- Collation
- ToUpper(dst []byte, src []byte) []byte
- ToLower(dst []byte, src []byte) []byte
-}
-
// ID is a numeric identifier for a collation. These identifiers are defined by MySQL, not by Vitess.
type ID uint16
-// Get returns the Collation identified by this ID. If the ID is invalid, this returns nil
-func (i ID) Get() Collation {
- if int(i) < len(collationsById) {
- return collationsById[i]
- }
- return nil
-}
-
-// Valid returns whether this Collation ID is valid (i.e. identifies a valid collation)
-func (i ID) Valid() bool {
- return int(i) < len(collationsById) && collationsById[i] != nil
-}
-
// Unknown is the default ID for an unknown collation.
const Unknown ID = 0
-
-// Collation implements a MySQL-compatible collation. It defines how to compare
-// for sorting order and equality two strings with the same encoding.
-type Collation interface {
- // ID returns the numerical identifier for this collation. This is the same
- // value that is returned by MySQL in a query's headers to identify the collation
- // for a given column
- ID() ID
-
- // Name is the full name of this collation, in the form of "ENCODING_LANG_SENSITIVITY"
- Name() string
-
- // Collate compares two strings using this collation. `left` and `right` must be the
- // two strings encoded in the proper encoding for this collation. If `isPrefix` is true,
- // the function instead behaves equivalently to `strings.HasPrefix(left, right)`, but
- // being collation-aware.
- // It returns a numeric value like a normal comparison function: <0 if left < right,
- // 0 if left == right, >0 if left > right
- Collate(left, right []byte, isPrefix bool) int
-
- // WeightString returns a weight string for the given `src` string. A weight string
- // is a binary representation of the weights for the given string, that can be
- // compared byte-wise to return identical results to collating this string.
- //
- // This means:
- // bytes.Compare(WeightString(left), WeightString(right)) == Collate(left, right)
- //
- // The semantics of this API have been carefully designed to match MySQL's behavior
- // in its `strnxfrm` API. Most notably, the `numCodepoints` argument implies different
- // behaviors depending on the collation's padding mode:
- //
- // - For collations that pad WITH SPACE (this is, all legacy collations in MySQL except
- // for the newly introduced UCA v9.0.0 utf8mb4 collations in MySQL 8.0), `numCodepoints`
- // can have the following values:
- //
- // - if `numCodepoints` is any integer greater than zero, this treats the `src` string
- // as if it were in a `CHAR(numCodepoints)` column in MySQL, meaning that the resulting
- // weight string will be padded with the weight for the SPACE character until it becomes
- // wide enough to fill the `CHAR` column. This is necessary to perform weight comparisons
- // in fixed-`CHAR` columns. If `numCodepoints` is smaller than the actual amount of
- // codepoints stored in `src`, the result is unspecified.
- //
- // - if `numCodepoints` is zero, this is equivalent to `numCodepoints = RuneCount(src)`,
- // meaning that the resulting weight string will have no padding at the end: it'll only have
- // the weight values for the exact amount of codepoints contained in `src`. This is the
- // behavior required to sort `VARCHAR` columns.
- //
- // - if `numCodepoints` is the special constant PadToMax, then the `dst` slice must be
- // pre-allocated to a zero-length slice with enough capacity to hold the complete weight
- // string, and any remaining capacity in `dst` will be filled by the weights for the
- // padding character, repeatedly. This is a special flag used by MySQL when performing
- // filesorts, where all the sorting keys must have identical sizes, even for `VARCHAR`
- // columns.
- //
- // - For collations that have NO PAD (this is, the newly introduced UCA v9.0.0 utf8mb4 collations
- // in MySQL 8.0), `numCodepoints` can only have the special constant `PadToMax`, which will make
- // the weight string padding equivalent to a PAD SPACE collation (as explained in the previous
- // section). All other values for `numCodepoints` are ignored, because NO PAD collations always
- // return the weights for the codepoints in their strings, with no further padding at the end.
- //
- // The resulting weight string is written to `dst`, which can be pre-allocated to
- // WeightStringLen() bytes to prevent growing the slice. `dst` can also be nil, in which
- // case it will grow dynamically. If `numCodepoints` has the special PadToMax value explained
- // earlier, `dst` MUST be pre-allocated to the target size or the function will return an
- // empty slice.
- WeightString(dst, src []byte, numCodepoints int) []byte
-
- // WeightStringLen returns a size (in bytes) that would fit any weight strings for a string
- // with `numCodepoints` using this collation. Note that this is a higher bound for the size
- // of the string, and in practice weight strings can be significantly smaller than the
- // returned value.
- WeightStringLen(numCodepoints int) int
-
- // Hash returns a 32 or 64 bit identifier (depending on the platform) that uniquely identifies
- // the given string based on this collation. It is functionally equivalent to calling WeightString
- // and then hashing the result.
- //
- // Consequently, if the hashes for two strings are different, then the two strings are considered
- // different according to this collation. If the hashes for two strings are equal, the two strings
- // may or may not be considered equal according to this collation, because hashes can collide unlike
- // weight strings.
- //
- // The numCodepoints argument has the same behavior as in WeightString: if this collation uses PAD SPACE,
- // the hash will interpret the source string as if it were stored in a `CHAR(n)` column. If the value of
- // numCodepoints is 0, this is equivalent to setting `numCodepoints = RuneCount(src)`.
- // For collations with NO PAD, the numCodepoint argument is ignored.
- Hash(hasher *vthash.Hasher, src []byte, numCodepoints int)
-
- // Wildcard returns a matcher for the given wildcard pattern. The matcher can be used to repeatedly
- // test different strings to check if they match the pattern. The pattern must be a traditional wildcard
- // pattern, which may contain the provided special characters for matching one character or several characters.
- // The provided `escape` character will be used as an escape sequence in front of the other special characters.
- //
- // This method is fully collation aware; the matching will be performed according to the underlying collation.
- // I.e. if this is a case-insensitive collation, matching will be case-insensitive.
- //
- // The returned WildcardPattern is always valid, but if the provided special characters do not exist in this
- // collation's repertoire, the returned pattern will not match any strings. Likewise, if the provided pattern
- // has invalid syntax, the returned pattern will not match any strings.
- //
- // If the provided special characters are 0, the defaults to parse an SQL 'LIKE' statement will be used.
- // This is, '_' for matching one character, '%' for matching many and '\\' for escape.
- //
- // This method can also be used for Shell-like matching with '?', '*' and '\\' as their respective special
- // characters.
- Wildcard(pat []byte, matchOne, matchMany, escape rune) WildcardPattern
-
- // Charset returns the Charset with which this collation is encoded
- Charset() Charset
-
- // IsBinary returns whether this collation is a binary collation
- IsBinary() bool
-}
-
-// WildcardPattern is a matcher for a wildcard pattern, constructed from a given collation
-type WildcardPattern interface {
- // Match returns whether the given string matches this pattern
- Match(in []byte) bool
-}
-
-type Charset = charset.Charset
-
-const PadToMax = math.MaxInt32
-
-func minInt(i1, i2 int) int {
- if i1 < i2 {
- return i1
- }
- return i2
-}
diff --git a/go/mysql/collations/8bit.go b/go/mysql/collations/colldata/8bit.go
similarity index 92%
rename from go/mysql/collations/8bit.go
rename to go/mysql/collations/colldata/8bit.go
index 7a22ed1d0e1..2355888bbab 100644
--- a/go/mysql/collations/8bit.go
+++ b/go/mysql/collations/colldata/8bit.go
@@ -14,9 +14,10 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package collations
+package colldata
import (
+ "vitess.io/vitess/go/mysql/collations"
"vitess.io/vitess/go/mysql/collations/charset"
"vitess.io/vitess/go/vt/vthash"
)
@@ -42,7 +43,7 @@ type simpletables struct {
}
type Collation_8bit_bin struct {
- id ID
+ id collations.ID
name string
simpletables
charset charset.Charset
@@ -52,7 +53,7 @@ func (c *Collation_8bit_bin) Name() string {
return c.name
}
-func (c *Collation_8bit_bin) ID() ID {
+func (c *Collation_8bit_bin) ID() collations.ID {
return c.id
}
@@ -78,7 +79,7 @@ func (c *Collation_8bit_bin) WeightString(dst, src []byte, numCodepoints int) []
case PadToMax:
padToMax = true
default:
- copyCodepoints = minInt(copyCodepoints, numCodepoints)
+ copyCodepoints = min(copyCodepoints, numCodepoints)
}
dst = append(dst, src[:copyCodepoints]...)
@@ -92,7 +93,7 @@ func (c *Collation_8bit_bin) Hash(hasher *vthash.Hasher, src []byte, numCodepoin
return
}
- tocopy := minInt(len(src), numCodepoints)
+ tocopy := min(len(src), numCodepoints)
hasher.Write(src[:tocopy])
numCodepoints -= tocopy
@@ -129,7 +130,7 @@ func (c *Collation_8bit_bin) ToUpper(dst, src []byte) []byte {
}
type Collation_8bit_simple_ci struct {
- id ID
+ id collations.ID
name string
simpletables
charset charset.Charset
@@ -139,7 +140,7 @@ func (c *Collation_8bit_simple_ci) Name() string {
return c.name
}
-func (c *Collation_8bit_simple_ci) ID() ID {
+func (c *Collation_8bit_simple_ci) ID() collations.ID {
return c.id
}
@@ -153,7 +154,7 @@ func (c *Collation_8bit_simple_ci) IsBinary() bool {
func (c *Collation_8bit_simple_ci) Collate(left, right []byte, rightIsPrefix bool) int {
sortOrder := c.sort
- cmpLen := minInt(len(left), len(right))
+ cmpLen := min(len(left), len(right))
for i := 0; i < cmpLen; i++ {
sortL, sortR := sortOrder[left[i]], sortOrder[right[i]]
@@ -178,7 +179,7 @@ func (c *Collation_8bit_simple_ci) WeightString(dst, src []byte, numCodepoints i
case PadToMax:
padToMax = true
default:
- copyCodepoints = minInt(copyCodepoints, numCodepoints)
+ copyCodepoints = min(copyCodepoints, numCodepoints)
}
for _, ch := range src[:copyCodepoints] {
@@ -192,7 +193,7 @@ func (c *Collation_8bit_simple_ci) Hash(hasher *vthash.Hasher, src []byte, numCo
var tocopy = len(src)
if numCodepoints > 0 {
- tocopy = minInt(tocopy, numCodepoints)
+ tocopy = min(tocopy, numCodepoints)
}
hasher.Write64(uint64(c.id))
@@ -251,8 +252,8 @@ func (c *Collation_8bit_simple_ci) ToUpper(dst, src []byte) []byte {
type Collation_binary struct{}
-func (c *Collation_binary) ID() ID {
- return CollationBinaryID
+func (c *Collation_binary) ID() collations.ID {
+ return collations.CollationBinaryID
}
func (c *Collation_binary) Name() string {
@@ -280,7 +281,7 @@ func (c *Collation_binary) WeightString(dst, src []byte, numCodepoints int) []by
case PadToMax:
padToMax = true
default:
- copyCodepoints = minInt(copyCodepoints, numCodepoints)
+ copyCodepoints = min(copyCodepoints, numCodepoints)
}
dst = append(dst, src[:copyCodepoints]...)
diff --git a/go/mysql/collations/cached_size.go b/go/mysql/collations/colldata/cached_size.go
similarity index 98%
rename from go/mysql/collations/cached_size.go
rename to go/mysql/collations/colldata/cached_size.go
index 6b5e901dffd..36167c69d6d 100644
--- a/go/mysql/collations/cached_size.go
+++ b/go/mysql/collations/colldata/cached_size.go
@@ -15,7 +15,7 @@ limitations under the License.
*/
// Code generated by Sizegen. DO NOT EDIT.
-package collations
+package colldata
import hack "vitess.io/vitess/go/hack"
diff --git a/go/mysql/collations/colldata/collation.go b/go/mysql/collations/colldata/collation.go
new file mode 100644
index 00000000000..ec66fc09b58
--- /dev/null
+++ b/go/mysql/collations/colldata/collation.go
@@ -0,0 +1,374 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package colldata
+
+import (
+ "fmt"
+ "math"
+
+ "vitess.io/vitess/go/mysql/collations"
+ "vitess.io/vitess/go/mysql/collations/charset"
+ "vitess.io/vitess/go/vt/vthash"
+)
+
+type Charset = charset.Charset
+
+// Collation implements a MySQL-compatible collation. It defines how to compare
+// for sorting order and equality two strings with the same encoding.
+type Collation interface {
+ // ID returns the numerical identifier for this collation. This is the same
+ // value that is returned by MySQL in a query's headers to identify the collation
+ // for a given column
+ ID() collations.ID
+
+ // Name is the full name of this collation, in the form of "ENCODING_LANG_SENSITIVITY"
+ Name() string
+
+ // Collate compares two strings using this collation. `left` and `right` must be the
+ // two strings encoded in the proper encoding for this collation. If `isPrefix` is true,
+ // the function instead behaves equivalently to `strings.HasPrefix(left, right)`, but
+ // being collation-aware.
+ // It returns a numeric value like a normal comparison function: <0 if left < right,
+ // 0 if left == right, >0 if left > right
+ Collate(left, right []byte, isPrefix bool) int
+
+ // WeightString returns a weight string for the given `src` string. A weight string
+ // is a binary representation of the weights for the given string, that can be
+ // compared byte-wise to return identical results to collating this string.
+ //
+ // This means:
+ // bytes.Compare(WeightString(left), WeightString(right)) == Collate(left, right)
+ //
+ // The semantics of this API have been carefully designed to match MySQL's behavior
+ // in its `strnxfrm` API. Most notably, the `numCodepoints` argument implies different
+ // behaviors depending on the collation's padding mode:
+ //
+ // - For collations that pad WITH SPACE (this is, all legacy collations in MySQL except
+ // for the newly introduced UCA v9.0.0 utf8mb4 collations in MySQL 8.0), `numCodepoints`
+ // can have the following values:
+ //
+ // - if `numCodepoints` is any integer greater than zero, this treats the `src` string
+ // as if it were in a `CHAR(numCodepoints)` column in MySQL, meaning that the resulting
+ // weight string will be padded with the weight for the SPACE character until it becomes
+ // wide enough to fill the `CHAR` column. This is necessary to perform weight comparisons
+ // in fixed-`CHAR` columns. If `numCodepoints` is smaller than the actual amount of
+ // codepoints stored in `src`, the result is unspecified.
+ //
+ // - if `numCodepoints` is zero, this is equivalent to `numCodepoints = RuneCount(src)`,
+ // meaning that the resulting weight string will have no padding at the end: it'll only have
+ // the weight values for the exact amount of codepoints contained in `src`. This is the
+ // behavior required to sort `VARCHAR` columns.
+ //
+ // - if `numCodepoints` is the special constant PadToMax, then the `dst` slice must be
+ // pre-allocated to a zero-length slice with enough capacity to hold the complete weight
+ // string, and any remaining capacity in `dst` will be filled by the weights for the
+ // padding character, repeatedly. This is a special flag used by MySQL when performing
+ // filesorts, where all the sorting keys must have identical sizes, even for `VARCHAR`
+ // columns.
+ //
+ // - For collations that have NO PAD (this is, the newly introduced UCA v9.0.0 utf8mb4 collations
+ // in MySQL 8.0), `numCodepoints` can only have the special constant `PadToMax`, which will make
+ // the weight string padding equivalent to a PAD SPACE collation (as explained in the previous
+ // section). All other values for `numCodepoints` are ignored, because NO PAD collations always
+ // return the weights for the codepoints in their strings, with no further padding at the end.
+ //
+ // The resulting weight string is written to `dst`, which can be pre-allocated to
+ // WeightStringLen() bytes to prevent growing the slice. `dst` can also be nil, in which
+ // case it will grow dynamically. If `numCodepoints` has the special PadToMax value explained
+ // earlier, `dst` MUST be pre-allocated to the target size or the function will return an
+ // empty slice.
+ WeightString(dst, src []byte, numCodepoints int) []byte
+
+ // WeightStringLen returns a size (in bytes) that would fit any weight strings for a string
+ // with `numCodepoints` using this collation. Note that this is a higher bound for the size
+ // of the string, and in practice weight strings can be significantly smaller than the
+ // returned value.
+ WeightStringLen(numCodepoints int) int
+
+ // Hash returns a 32 or 64 bit identifier (depending on the platform) that uniquely identifies
+ // the given string based on this collation. It is functionally equivalent to calling WeightString
+ // and then hashing the result.
+ //
+ // Consequently, if the hashes for two strings are different, then the two strings are considered
+ // different according to this collation. If the hashes for two strings are equal, the two strings
+ // may or may not be considered equal according to this collation, because hashes can collide unlike
+ // weight strings.
+ //
+ // The numCodepoints argument has the same behavior as in WeightString: if this collation uses PAD SPACE,
+ // the hash will interpret the source string as if it were stored in a `CHAR(n)` column. If the value of
+ // numCodepoints is 0, this is equivalent to setting `numCodepoints = RuneCount(src)`.
+ // For collations with NO PAD, the numCodepoint argument is ignored.
+ Hash(hasher *vthash.Hasher, src []byte, numCodepoints int)
+
+ // Wildcard returns a matcher for the given wildcard pattern. The matcher can be used to repeatedly
+ // test different strings to check if they match the pattern. The pattern must be a traditional wildcard
+ // pattern, which may contain the provided special characters for matching one character or several characters.
+ // The provided `escape` character will be used as an escape sequence in front of the other special characters.
+ //
+ // This method is fully collation aware; the matching will be performed according to the underlying collation.
+ // I.e. if this is a case-insensitive collation, matching will be case-insensitive.
+ //
+ // The returned WildcardPattern is always valid, but if the provided special characters do not exist in this
+ // collation's repertoire, the returned pattern will not match any strings. Likewise, if the provided pattern
+ // has invalid syntax, the returned pattern will not match any strings.
+ //
+ // If the provided special characters are 0, the defaults to parse an SQL 'LIKE' statement will be used.
+ // This is, '_' for matching one character, '%' for matching many and '\\' for escape.
+ //
+ // This method can also be used for Shell-like matching with '?', '*' and '\\' as their respective special
+ // characters.
+ Wildcard(pat []byte, matchOne, matchMany, escape rune) WildcardPattern
+
+ // Charset returns the Charset with which this collation is encoded
+ Charset() Charset
+
+ // IsBinary returns whether this collation is a binary collation
+ IsBinary() bool
+}
+
+// WildcardPattern is a matcher for a wildcard pattern, constructed from a given collation
+type WildcardPattern interface {
+ // Match returns whether the given string matches this pattern
+ Match(in []byte) bool
+}
+
+const PadToMax = math.MaxInt32
+
+// CaseAwareCollation implements lowercase and uppercase conventions for collations.
+type CaseAwareCollation interface {
+ Collation
+ ToUpper(dst []byte, src []byte) []byte
+ ToLower(dst []byte, src []byte) []byte
+}
+
+func Lookup(id collations.ID) Collation {
+ if int(id) >= len(collationsById) {
+ return nil
+ }
+ return collationsById[id]
+}
+
+// All returns a slice with all known collations in Vitess.
+func All(env *collations.Environment) []Collation {
+ allCols := env.AllCollationIDs()
+ all := make([]Collation, 0, len(allCols))
+ for _, col := range allCols {
+ all = append(all, collationsById[col])
+ }
+ return all
+}
+
+func checkCompatibleCollations(
+ left Collation, leftCoercibility collations.Coercibility, leftRepertoire collations.Repertoire,
+ right Collation, rightCoercibility collations.Coercibility, rightRepertoire collations.Repertoire,
+) bool {
+ leftCS := left.Charset()
+ rightCS := right.Charset()
+
+ switch leftCS.(type) {
+ case charset.Charset_utf8mb4:
+ if leftCoercibility <= rightCoercibility {
+ return true
+ }
+
+ case charset.Charset_utf32:
+ switch {
+ case leftCoercibility < rightCoercibility:
+ return true
+ case leftCoercibility == rightCoercibility:
+ if !charset.IsUnicode(rightCS) {
+ return true
+ }
+ if !left.IsBinary() {
+ return true
+ }
+ }
+
+ case charset.Charset_utf8mb3, charset.Charset_ucs2, charset.Charset_utf16, charset.Charset_utf16le:
+ switch {
+ case leftCoercibility < rightCoercibility:
+ return true
+ case leftCoercibility == rightCoercibility:
+ if !charset.IsUnicode(rightCS) {
+ return true
+ }
+ }
+ }
+
+ if rightRepertoire == collations.RepertoireASCII {
+ switch {
+ case leftCoercibility < rightCoercibility:
+ return true
+ case leftCoercibility == rightCoercibility:
+ if leftRepertoire == collations.RepertoireUnicode {
+ return true
+ }
+ }
+ }
+
+ return false
+}
+
+// CoercionOptions is used to configure how aggressive the algorithm can be
+// when merging two different collations by transcoding them.
+type CoercionOptions struct {
+ // ConvertToSuperset allows merging two different collations as long
+ // as the charset of one of them is a strict superset of the other. In
+ // order to operate on the two expressions, one of them will need to
+ // be transcoded. This transcoding will always be safe because the string
+ // with the smallest repertoire will be transcoded to its superset, which
+ // cannot fail.
+ ConvertToSuperset bool
+
+ // ConvertWithCoercion allows merging two different collations by forcing
+ // a coercion as long as the coercibility of the two sides is lax enough.
+ // This will force a transcoding of one of the expressions even if their
+ // respective charsets are not a strict superset, so the resulting transcoding
+ // CAN fail depending on the content of their strings.
+ ConvertWithCoercion bool
+}
+
+// Coercion is a function that will transform either the given argument
+// arguments of the function into a specific character set. The `dst` argument
+// will be used as the destination of the coerced argument, but it can be nil.
+type Coercion func(dst, in []byte) ([]byte, error)
+
+// Merge returns a Coercion function for a pair of TypedCollation based
+// on their coercibility.
+//
+// The function takes the typed collations for the two sides of a text operation
+// (namely, a comparison or concatenation of two textual expressions). These typed
+// collations includes the actual collation for the expression on each size, their
+// coercibility values (see: Coercibility) and their respective repertoires,
+// and returns the target collation (i.e. the collation into which the two expressions
+// must be coerced, and a Coercion function. The Coercion function can be called repeatedly
+// with the different values for the two expressions and will transcode either
+// the left-hand or right-hand value to the appropriate charset so it can be
+// collated against the other value.
+//
+// If the collations for both sides of the expressions are the same, the returned
+// Coercion function will be a no-op. Likewise, if the two collations are not the same,
+// but they are compatible and have the same charset, the Coercion function will also
+// be a no-op.
+//
+// If the collations for both sides of the expression are not compatible, an error
+// will be returned and the returned TypedCollation and Coercion will be nil.
+func Merge(env *collations.Environment, left, right collations.TypedCollation, opt CoercionOptions) (collations.TypedCollation, Coercion, Coercion, error) {
+ leftColl := Lookup(left.Collation)
+ rightColl := Lookup(right.Collation)
+ if leftColl == nil || rightColl == nil {
+ return collations.TypedCollation{}, nil, nil, fmt.Errorf("unsupported TypeCollationID: %v / %v", left.Collation, right.Collation)
+ }
+
+ leftCS := leftColl.Charset()
+ rightCS := rightColl.Charset()
+
+ if left.Coercibility == collations.CoerceExplicit && right.Coercibility == collations.CoerceExplicit {
+ if left.Collation != right.Collation {
+ goto cannotCoerce
+ }
+ }
+
+ if leftCS.Name() == rightCS.Name() {
+ switch {
+ case left.Coercibility < right.Coercibility:
+ left.Repertoire |= right.Repertoire
+ return left, nil, nil, nil
+
+ case left.Coercibility > right.Coercibility:
+ right.Repertoire |= left.Repertoire
+ return right, nil, nil, nil
+
+ case left.Collation == right.Collation:
+ left.Repertoire |= right.Repertoire
+ return left, nil, nil, nil
+ }
+
+ if left.Coercibility == collations.CoerceExplicit {
+ goto cannotCoerce
+ }
+
+ leftCsBin := leftColl.IsBinary()
+ rightCsBin := rightColl.IsBinary()
+
+ switch {
+ case leftCsBin && rightCsBin:
+ left.Coercibility = collations.CoerceNone
+ return left, nil, nil, nil
+
+ case leftCsBin:
+ return left, nil, nil, nil
+
+ case rightCsBin:
+ return right, nil, nil, nil
+ }
+
+ defaults := env.LookupByCharset(leftCS.Name())
+ return collations.TypedCollation{
+ Collation: defaults.Binary,
+ Coercibility: collations.CoerceNone,
+ Repertoire: left.Repertoire | right.Repertoire,
+ }, nil, nil, nil
+ }
+
+ if _, leftIsBinary := leftColl.(*Collation_binary); leftIsBinary {
+ if left.Coercibility <= right.Coercibility {
+ return left, nil, nil, nil
+ }
+ goto coerceToRight
+ }
+ if _, rightIsBinary := rightColl.(*Collation_binary); rightIsBinary {
+ if left.Coercibility >= right.Coercibility {
+ return right, nil, nil, nil
+ }
+ goto coerceToLeft
+ }
+
+ if opt.ConvertToSuperset {
+ if checkCompatibleCollations(leftColl, left.Coercibility, left.Repertoire, rightColl, right.Coercibility, right.Repertoire) {
+ goto coerceToLeft
+ }
+ if checkCompatibleCollations(rightColl, right.Coercibility, right.Repertoire, leftColl, left.Coercibility, left.Repertoire) {
+ goto coerceToRight
+ }
+ }
+
+ if opt.ConvertWithCoercion {
+ if left.Coercibility < right.Coercibility && right.Coercibility > collations.CoerceImplicit {
+ goto coerceToLeft
+ }
+ if right.Coercibility < left.Coercibility && left.Coercibility > collations.CoerceImplicit {
+ goto coerceToRight
+ }
+ }
+
+cannotCoerce:
+ return collations.TypedCollation{}, nil, nil, fmt.Errorf("Illegal mix of collations (%s,%s) and (%s,%s)",
+ leftColl.Name(), left.Coercibility, rightColl.Name(), right.Coercibility)
+
+coerceToLeft:
+ return left, nil,
+ func(dst, in []byte) ([]byte, error) {
+ return charset.Convert(dst, leftCS, in, rightCS)
+ }, nil
+
+coerceToRight:
+ return right,
+ func(dst, in []byte) ([]byte, error) {
+ return charset.Convert(dst, rightCS, in, leftCS)
+ }, nil, nil
+}
diff --git a/go/mysql/collations/fuzz.go b/go/mysql/collations/colldata/fuzz.go
similarity index 98%
rename from go/mysql/collations/fuzz.go
rename to go/mysql/collations/colldata/fuzz.go
index e71eae3fbdc..c5ebf50698b 100644
--- a/go/mysql/collations/fuzz.go
+++ b/go/mysql/collations/colldata/fuzz.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package collations
+package colldata
import (
fuzz "github.com/AdaLogics/go-fuzz-headers"
diff --git a/go/mysql/collations/fuzz_test.go b/go/mysql/collations/colldata/fuzz_test.go
similarity index 96%
rename from go/mysql/collations/fuzz_test.go
rename to go/mysql/collations/colldata/fuzz_test.go
index 1f36fd34ff3..0c11116f580 100644
--- a/go/mysql/collations/fuzz_test.go
+++ b/go/mysql/collations/colldata/fuzz_test.go
@@ -18,9 +18,11 @@ limitations under the License.
// The fuzzing tests for collations use the new Fuzz implementation in Go 1.18+
-package collations
+package colldata
-import "testing"
+import (
+ "testing"
+)
func FuzzUCACollate(f *testing.F) {
for _, left := range AllTestStrings {
diff --git a/go/mysql/collations/colldata/golden_test.go b/go/mysql/collations/colldata/golden_test.go
new file mode 100644
index 00000000000..2b41ebcddc6
--- /dev/null
+++ b/go/mysql/collations/colldata/golden_test.go
@@ -0,0 +1,82 @@
+/*
+Copyright 2021 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package colldata
+
+import (
+ "bytes"
+ "fmt"
+ "path/filepath"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+
+ "vitess.io/vitess/go/mysql/collations/charset"
+ "vitess.io/vitess/go/mysql/collations/internal/testutil"
+)
+
+func TestGoldenWeights(t *testing.T) {
+ gllGoldenTests, err := filepath.Glob("testdata/wiki_*.gob.gz")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for _, goldenPath := range gllGoldenTests {
+ golden := &testutil.GoldenTest{}
+ if err := golden.DecodeFromFile(goldenPath); err != nil {
+ t.Fatal(err)
+ }
+
+ for _, goldenCase := range golden.Cases {
+ t.Run(fmt.Sprintf("%s (%s)", golden.Name, goldenCase.Lang), func(t *testing.T) {
+ for coll, expected := range goldenCase.Weights {
+ coll := testcollation(t, coll)
+
+ input, err := charset.ConvertFromUTF8(nil, coll.Charset(), goldenCase.Text)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ result := coll.WeightString(nil, input, 0)
+ assert.True(t, bytes.Equal(expected, result), "mismatch for collation=%s\noriginal: %s\ninput: %#v\nexpected: %v\nactual: %v", coll.Name(), string(goldenCase.Text), input, expected, result)
+
+ }
+ })
+ }
+ }
+}
+
+func TestCollationsForLanguage(t *testing.T) {
+ allCollations := testall()
+ langCounts := make(map[testutil.Lang][]string)
+
+ for lang := range testutil.KnownLanguages {
+ var matched []string
+ for _, coll := range allCollations {
+ name := coll.Name()
+ if lang.MatchesCollation(name) {
+ matched = append(matched, name)
+ }
+ }
+ langCounts[lang] = matched
+ }
+
+ for lang := range testutil.KnownLanguages {
+ assert.NotEqual(t, 0, len(langCounts[lang]), "no collations found for %q", lang)
+
+ t.Logf("%s: %v", lang, langCounts[lang])
+ }
+}
diff --git a/go/mysql/collations/multibyte.go b/go/mysql/collations/colldata/multibyte.go
similarity index 95%
rename from go/mysql/collations/multibyte.go
rename to go/mysql/collations/colldata/multibyte.go
index f9d13df2d1f..cc123a25a1a 100644
--- a/go/mysql/collations/multibyte.go
+++ b/go/mysql/collations/colldata/multibyte.go
@@ -14,23 +14,24 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package collations
+package colldata
import (
"math"
+ "vitess.io/vitess/go/mysql/collations"
"vitess.io/vitess/go/mysql/collations/charset"
"vitess.io/vitess/go/vt/vthash"
)
type Collation_multibyte struct {
- id ID
+ id collations.ID
name string
sort *[256]byte
charset charset.Charset
}
-func (c *Collation_multibyte) ID() ID {
+func (c *Collation_multibyte) ID() collations.ID {
return c.id
}
@@ -51,7 +52,7 @@ func (c *Collation_multibyte) Collate(left, right []byte, isPrefix bool) int {
return collationBinary(left, right, isPrefix)
}
- cmpLen := minInt(len(left), len(right))
+ cmpLen := min(len(left), len(right))
cs := c.charset
sortOrder := c.sort
for i := 0; i < cmpLen; i++ {
@@ -62,7 +63,7 @@ func (c *Collation_multibyte) Collate(left, right []byte, isPrefix bool) int {
}
_, widthL := cs.DecodeRune(left[i:])
_, widthR := cs.DecodeRune(right[i:])
- switch minInt(widthL, widthR) {
+ switch min(widthL, widthR) {
case 4:
i++
if left[i] != right[i] {
diff --git a/go/mysql/collations/mysqldata.go b/go/mysql/collations/colldata/mysqldata.go
similarity index 99%
rename from go/mysql/collations/mysqldata.go
rename to go/mysql/collations/colldata/mysqldata.go
index 0b3d10372d0..f626028cb95 100644
--- a/go/mysql/collations/mysqldata.go
+++ b/go/mysql/collations/colldata/mysqldata.go
@@ -1,6 +1,22 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
// Code generated by makecolldata DO NOT EDIT
-package collations
+package colldata
import (
charset "vitess.io/vitess/go/mysql/collations/charset"
diff --git a/go/mysql/collations/mysqlucadata.bin b/go/mysql/collations/colldata/mysqlucadata.bin
similarity index 100%
rename from go/mysql/collations/mysqlucadata.bin
rename to go/mysql/collations/colldata/mysqlucadata.bin
diff --git a/go/mysql/collations/mysqlucadata.go b/go/mysql/collations/colldata/mysqlucadata.go
similarity index 99%
rename from go/mysql/collations/mysqlucadata.go
rename to go/mysql/collations/colldata/mysqlucadata.go
index ae8e2d48642..0affc45d11f 100644
--- a/go/mysql/collations/mysqlucadata.go
+++ b/go/mysql/collations/colldata/mysqlucadata.go
@@ -1,10 +1,25 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
// Code generated by makecolldata DO NOT EDIT
-package collations
+package colldata
import (
_ "embed"
- reflect "reflect"
unsafe "unsafe"
)
@@ -1402,5 +1417,5 @@ var weightTable_uca520 = []*[]uint16{
var weightsUCA_embed_data string
func weightsUCA_embed(pos, length int) []uint16 {
- return (*[0x7fff0000]uint16)(unsafe.Pointer((*reflect.StringHeader)(unsafe.Pointer(&weightsUCA_embed_data)).Data))[pos : pos+length]
+ return (*[0x7fff0000]uint16)(unsafe.Pointer(unsafe.StringData(weightsUCA_embed_data)))[pos : pos+length]
}
diff --git a/go/mysql/collations/uca.go b/go/mysql/collations/colldata/uca.go
similarity index 96%
rename from go/mysql/collations/uca.go
rename to go/mysql/collations/colldata/uca.go
index 444fd3c295c..4b7272bfbc3 100644
--- a/go/mysql/collations/uca.go
+++ b/go/mysql/collations/colldata/uca.go
@@ -14,12 +14,13 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package collations
+package colldata
import (
"bytes"
"math/bits"
+ "vitess.io/vitess/go/mysql/collations"
"vitess.io/vitess/go/mysql/collations/charset"
"vitess.io/vitess/go/mysql/collations/internal/uca"
"vitess.io/vitess/go/vt/vthash"
@@ -27,7 +28,7 @@ import (
type Collation_utf8mb4_uca_0900 struct {
name string
- id ID
+ id collations.ID
uca *uca.Collation900
}
@@ -35,7 +36,7 @@ func (c *Collation_utf8mb4_uca_0900) Name() string {
return c.name
}
-func (c *Collation_utf8mb4_uca_0900) ID() ID {
+func (c *Collation_utf8mb4_uca_0900) ID() collations.ID {
return c.id
}
@@ -213,7 +214,7 @@ func (c *Collation_utf8mb4_uca_0900) ToUpper(dst, src []byte) []byte {
type Collation_utf8mb4_0900_bin struct{}
-func (c *Collation_utf8mb4_0900_bin) ID() ID {
+func (c *Collation_utf8mb4_0900_bin) ID() collations.ID {
return 309
}
@@ -271,11 +272,11 @@ func (c *Collation_utf8mb4_0900_bin) ToUpper(dst, src []byte) []byte {
type Collation_uca_legacy struct {
name string
- id ID
+ id collations.ID
uca *uca.CollationLegacy
}
-func (c *Collation_uca_legacy) ID() ID {
+func (c *Collation_uca_legacy) ID() collations.ID {
return c.id
}
diff --git a/go/mysql/collations/uca_contraction_test.go b/go/mysql/collations/colldata/uca_contraction_test.go
similarity index 99%
rename from go/mysql/collations/uca_contraction_test.go
rename to go/mysql/collations/colldata/uca_contraction_test.go
index 7d59b6fa4a8..d17ff21e255 100644
--- a/go/mysql/collations/uca_contraction_test.go
+++ b/go/mysql/collations/colldata/uca_contraction_test.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package collations
+package colldata
import (
"encoding/json"
diff --git a/go/mysql/collations/uca_tables_test.go b/go/mysql/collations/colldata/uca_tables_test.go
similarity index 95%
rename from go/mysql/collations/uca_tables_test.go
rename to go/mysql/collations/colldata/uca_tables_test.go
index 011095e1cf6..40c2f3bbed3 100644
--- a/go/mysql/collations/uca_tables_test.go
+++ b/go/mysql/collations/colldata/uca_tables_test.go
@@ -14,13 +14,12 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package collations
+package colldata
import (
"encoding/json"
"fmt"
"os"
- "reflect"
"strconv"
"testing"
"unsafe"
@@ -95,12 +94,12 @@ func TestWeightsForAllCodepoints(t *testing.T) {
}
func TestWeightTablesAreDeduplicated(t *testing.T) {
- sliceptr := func(table uca.Weights) uintptr {
- hdr := (*reflect.SliceHeader)(unsafe.Pointer(&table))
- return hdr.Data
+ sliceptr := func(table uca.Weights) unsafe.Pointer {
+ data := unsafe.SliceData(table)
+ return unsafe.Pointer(data)
}
- uniqueTables := make(map[uintptr]int)
+ uniqueTables := make(map[unsafe.Pointer]int)
for _, col := range testall() {
var weights uca.Weights
switch col := col.(type) {
diff --git a/go/mysql/collations/uca_test.go b/go/mysql/collations/colldata/uca_test.go
similarity index 99%
rename from go/mysql/collations/uca_test.go
rename to go/mysql/collations/colldata/uca_test.go
index 5e3f22929c8..70c9312636e 100644
--- a/go/mysql/collations/uca_test.go
+++ b/go/mysql/collations/colldata/uca_test.go
@@ -14,12 +14,13 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package collations
+package colldata
import (
"bytes"
"fmt"
"math/rand"
+ "slices"
"sort"
"strings"
"sync"
@@ -28,7 +29,6 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
- "golang.org/x/exp/slices"
"vitess.io/vitess/go/mysql/collations/charset"
"vitess.io/vitess/go/vt/vthash"
@@ -949,8 +949,8 @@ func TestUCACollationOrder(t *testing.T) {
j := rand.Intn(i + 1)
ary[i], ary[j] = ary[j], ary[i]
}
- slices.SortFunc(ary, func(a, b string) bool {
- return col.Collate([]byte(a), []byte(b), false) < 0
+ slices.SortFunc(ary, func(a, b string) int {
+ return col.Collate([]byte(a), []byte(b), false)
})
require.Equal(t, sorted, ary)
}
diff --git a/go/mysql/collations/unicase.go b/go/mysql/collations/colldata/unicase.go
similarity index 99%
rename from go/mysql/collations/unicase.go
rename to go/mysql/collations/colldata/unicase.go
index c669c2368ad..964d48d7107 100644
--- a/go/mysql/collations/unicase.go
+++ b/go/mysql/collations/colldata/unicase.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package collations
+package colldata
import (
"vitess.io/vitess/go/mysql/collations/charset"
diff --git a/go/mysql/collations/unicode.go b/go/mysql/collations/colldata/unicode.go
similarity index 96%
rename from go/mysql/collations/unicode.go
rename to go/mysql/collations/colldata/unicode.go
index 8168595cd34..c0495b0474f 100644
--- a/go/mysql/collations/unicode.go
+++ b/go/mysql/collations/colldata/unicode.go
@@ -14,25 +14,26 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package collations
+package colldata
import (
"bytes"
"math"
"math/bits"
+ "vitess.io/vitess/go/mysql/collations"
"vitess.io/vitess/go/mysql/collations/charset"
"vitess.io/vitess/go/vt/vthash"
)
type Collation_unicode_general_ci struct {
- id ID
+ id collations.ID
name string
unicase *UnicaseInfo
charset charset.Charset
}
-func (c *Collation_unicode_general_ci) ID() ID {
+func (c *Collation_unicode_general_ci) ID() collations.ID {
return c.id
}
@@ -164,12 +165,12 @@ func (c *Collation_unicode_general_ci) Wildcard(pat []byte, matchOne rune, match
}
type Collation_unicode_bin struct {
- id ID
+ id collations.ID
name string
charset charset.Charset
}
-func (c *Collation_unicode_bin) ID() ID {
+func (c *Collation_unicode_bin) ID() collations.ID {
return c.id
}
@@ -352,7 +353,7 @@ func (c *Collation_unicode_bin) Wildcard(pat []byte, matchOne rune, matchMany ru
}
func collationBinary(left, right []byte, rightPrefix bool) int {
- minLen := minInt(len(left), len(right))
+ minLen := min(len(left), len(right))
if diff := bytes.Compare(left[:minLen], right[:minLen]); diff != 0 {
return diff
}
diff --git a/go/mysql/collations/wildcard.go b/go/mysql/collations/colldata/wildcard.go
similarity index 99%
rename from go/mysql/collations/wildcard.go
rename to go/mysql/collations/colldata/wildcard.go
index 5d8fd012375..01f4807b7df 100644
--- a/go/mysql/collations/wildcard.go
+++ b/go/mysql/collations/colldata/wildcard.go
@@ -38,7 +38,7 @@ limitations under the License.
//
// Because of this, we intend to enable the recursive algorithm by default.
-package collations
+package colldata
import (
"unicode/utf8"
diff --git a/go/mysql/collations/wildcard_test.go b/go/mysql/collations/colldata/wildcard_test.go
similarity index 99%
rename from go/mysql/collations/wildcard_test.go
rename to go/mysql/collations/colldata/wildcard_test.go
index dc6a44c644c..fff08f35c22 100644
--- a/go/mysql/collations/wildcard_test.go
+++ b/go/mysql/collations/colldata/wildcard_test.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package collations
+package colldata
import (
"testing"
diff --git a/go/mysql/collations/env.go b/go/mysql/collations/env.go
index d5b113fb204..91fc2a8bd8c 100644
--- a/go/mysql/collations/env.go
+++ b/go/mysql/collations/env.go
@@ -18,26 +18,29 @@ package collations
import (
"fmt"
+ "slices"
"strings"
"sync"
)
type colldefaults struct {
- Default Collation
- Binary Collation
+ Default ID
+ Binary ID
}
// Environment is a collation environment for a MySQL version, which contains
// a database of collations and defaults for that specific version.
type Environment struct {
- version collver
- byName map[string]Collation
- byCharset map[string]*colldefaults
- unsupported map[string]ID
+ version collver
+ byName map[string]ID
+ byCharset map[string]*colldefaults
+ byCharsetName map[ID]string
+ unsupported map[string]ID
+ byID map[ID]string
}
// LookupByName returns the collation with the given name.
-func (env *Environment) LookupByName(name string) Collation {
+func (env *Environment) LookupByName(name string) ID {
return env.byName[name]
}
@@ -45,37 +48,34 @@ func (env *Environment) LookupByName(name string) Collation {
// the collation is supported by this package.
func (env *Environment) LookupID(name string) (ID, bool) {
if supported, ok := env.byName[name]; ok {
- return supported.ID(), true
+ return supported, true
}
- if unsupported, ok := env.unsupported[name]; ok {
- return unsupported, false
+ if unsup, ok := env.unsupported[name]; ok {
+ return unsup, false
}
return Unknown, false
}
+// LookupName returns the collation name for the given ID and whether
+// the collation is supported by this package.
+func (env *Environment) LookupName(id ID) string {
+ return env.byID[id]
+}
+
// DefaultCollationForCharset returns the default collation for a charset
-func (env *Environment) DefaultCollationForCharset(charset string) Collation {
+func (env *Environment) DefaultCollationForCharset(charset string) ID {
if defaults, ok := env.byCharset[charset]; ok {
return defaults.Default
}
- return nil
+ return Unknown
}
// BinaryCollationForCharset returns the default binary collation for a charset
-func (env *Environment) BinaryCollationForCharset(charset string) Collation {
+func (env *Environment) BinaryCollationForCharset(charset string) ID {
if defaults, ok := env.byCharset[charset]; ok {
return defaults.Binary
}
- return nil
-}
-
-// AllCollations returns a slice with all known collations in Vitess.
-func (env *Environment) AllCollations() (all []Collation) {
- all = make([]Collation, 0, len(env.byName))
- for _, col := range env.byName {
- all = append(all, col)
- }
- return
+ return Unknown
}
var globalEnvironments = make(map[collver]*Environment)
@@ -109,7 +109,7 @@ func NewEnvironment(serverVersion string) *Environment {
case strings.HasSuffix(serverVersion, "-ripple"):
// the ripple binlog server can mask the actual version of mysqld;
// assume we have the highest
- version = collverMySQL80
+ version = collverMySQL8
case strings.Contains(serverVersion, "mariadb"):
switch {
case strings.Contains(serverVersion, "10.0."):
@@ -125,66 +125,62 @@ func NewEnvironment(serverVersion string) *Environment {
version = collverMySQL56
case strings.HasPrefix(serverVersion, "5.7."):
version = collverMySQL57
- case strings.HasPrefix(serverVersion, "8.0."):
- version = collverMySQL80
+ case strings.HasPrefix(serverVersion, "8."):
+ version = collverMySQL8
}
return fetchCacheEnvironment(version)
}
func makeEnv(version collver) *Environment {
env := &Environment{
- version: version,
- byName: make(map[string]Collation),
- byCharset: make(map[string]*colldefaults),
- unsupported: make(map[string]ID),
+ version: version,
+ byName: make(map[string]ID),
+ byCharset: make(map[string]*colldefaults),
+ byCharsetName: make(map[ID]string),
+ byID: make(map[ID]string),
+ unsupported: make(map[string]ID),
}
for collid, vi := range globalVersionInfo {
var ournames []string
+ var ourcharsets []string
for _, alias := range vi.alias {
if alias.mask&version != 0 {
ournames = append(ournames, alias.name)
+ ourcharsets = append(ourcharsets, alias.charset)
}
}
if len(ournames) == 0 {
continue
}
- var collation Collation
- if int(collid) < len(collationsById) {
- collation = collationsById[collid]
- }
- if collation == nil {
+ if int(collid) >= len(supported) || supported[collid] == "" {
for _, name := range ournames {
env.unsupported[name] = collid
}
continue
}
- for _, name := range ournames {
- env.byName[name] = collation
- }
-
- csname := collation.Charset().Name()
- if _, ok := env.byCharset[csname]; !ok {
- env.byCharset[csname] = &colldefaults{}
- }
- defaults := env.byCharset[csname]
- if vi.isdefault&version != 0 {
- defaults.Default = collation
- }
- if collation.IsBinary() {
- if defaults.Binary != nil && defaults.Binary.ID() > collation.ID() {
- // If there's more than one binary collation, the one with the
- // highest ID (i.e. the newest one) takes precedence. This applies
- // to utf8mb4_bin vs utf8mb4_0900_bin
- continue
+ for i, name := range ournames {
+ cs := ourcharsets[i]
+ env.byName[name] = collid
+ env.byID[collid] = name
+ env.byCharsetName[collid] = cs
+ defaults := env.byCharset[cs]
+ if defaults == nil {
+ defaults = &colldefaults{}
+ env.byCharset[cs] = defaults
+ }
+ if vi.isdefault&version != 0 {
+ defaults.Default = collid
+ }
+ if strings.HasSuffix(name, "_bin") && defaults.Binary < collid {
+ defaults.Binary = collid
}
- defaults.Binary = collation
}
}
- for from, to := range version.charsetAliases() {
+ for from, to := range charsetAliases() {
env.byCharset[from] = env.byCharset[to]
}
@@ -194,15 +190,13 @@ func makeEnv(version collver) *Environment {
// A few interesting character set values.
// See http://dev.mysql.com/doc/internals/en/character-set.html#packet-Protocol::CharacterSet
const (
- CollationUtf8mb3ID = 33
- CollationUtf8mb4ID = 255
- CollationBinaryID = 63
- CollationUtf8mb4BinID = 46
+ CollationUtf8mb3ID = 33
+ CollationUtf8mb4ID = 255
+ CollationBinaryID = 63
+ CollationUtf8mb4BinID = 46
+ CollationLatin1Swedish = 8
)
-// Binary is the default Binary collation
-var Binary = ID(CollationBinaryID).Get()
-
// SystemCollation is the default collation for the system tables
// such as the information schema. This is still utf8mb3 to match
// MySQLs behavior. This means that you can't use utf8mb4 in table
@@ -218,7 +212,7 @@ var SystemCollation = TypedCollation{
// this mapping will change, so it's important to use this helper so that
// Vitess code has a consistent mapping for the active collations environment.
func (env *Environment) CharsetAlias(charset string) (alias string, ok bool) {
- alias, ok = env.version.charsetAliases()[charset]
+ alias, ok = charsetAliases()[charset]
return
}
@@ -228,10 +222,10 @@ func (env *Environment) CharsetAlias(charset string) (alias string, ok bool) {
// Vitess code has a consistent mapping for the active collations environment.
func (env *Environment) CollationAlias(collation string) (string, bool) {
col := env.LookupByName(collation)
- if col == nil {
+ if col == Unknown {
return collation, false
}
- allCols, ok := globalVersionInfo[col.ID()]
+ allCols, ok := globalVersionInfo[col]
if !ok {
return collation, false
}
@@ -239,7 +233,7 @@ func (env *Environment) CollationAlias(collation string) (string, bool) {
return collation, false
}
for _, alias := range allCols.alias {
- for source, dest := range env.version.charsetAliases() {
+ for source, dest := range charsetAliases() {
if strings.HasPrefix(collation, fmt.Sprintf("%s_", source)) &&
strings.HasPrefix(alias.name, fmt.Sprintf("%s_", dest)) {
return alias.name, true
@@ -256,7 +250,7 @@ func (env *Environment) CollationAlias(collation string) (string, bool) {
// For older MySQL environments, the default charset is `utf8mb4_general_ci`.
func (env *Environment) DefaultConnectionCharset() uint8 {
switch env.version {
- case collverMySQL80:
+ case collverMySQL8:
return uint8(CollationUtf8mb4ID)
default:
return 45
@@ -281,12 +275,29 @@ func (env *Environment) ParseConnectionCharset(csname string) (uint8, error) {
var collid ID = 0
csname = strings.ToLower(csname)
if defaults, ok := env.byCharset[csname]; ok {
- collid = defaults.Default.ID()
+ collid = defaults.Default
} else if coll, ok := env.byName[csname]; ok {
- collid = coll.ID()
+ collid = coll
}
if collid == 0 || collid > 255 {
return 0, fmt.Errorf("unsupported connection charset: %q", csname)
}
return uint8(collid), nil
}
+
+func (env *Environment) AllCollationIDs() []ID {
+ all := make([]ID, 0, len(env.byID))
+ for v := range env.byID {
+ all = append(all, v)
+ }
+ slices.Sort(all)
+ return all
+}
+
+func (env *Environment) LookupByCharset(name string) *colldefaults {
+ return env.byCharset[name]
+}
+
+func (env *Environment) LookupCharsetName(coll ID) string {
+ return env.byCharsetName[coll]
+}
diff --git a/go/mysql/collations/golden_test.go b/go/mysql/collations/golden_test.go
index 32b9e90394f..099f77268b7 100644
--- a/go/mysql/collations/golden_test.go
+++ b/go/mysql/collations/golden_test.go
@@ -1,5 +1,5 @@
/*
-Copyright 2021 The Vitess Authors.
+Copyright 2023 The Vitess Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -17,71 +17,58 @@ limitations under the License.
package collations
import (
- "bytes"
"fmt"
"os"
- "path/filepath"
"sort"
"strings"
"testing"
- "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
-
- "vitess.io/vitess/go/mysql/collations/charset"
- "vitess.io/vitess/go/mysql/collations/internal/testutil"
)
-func TestGoldenWeights(t *testing.T) {
- gllGoldenTests, err := filepath.Glob("testdata/wiki_*.gob.gz")
- if err != nil {
- t.Fatal(err)
+func TestAllCollationsByCharset(t *testing.T) {
+ var defaults1 = map[string][2]string{
+ "utf8mb4": {"utf8mb4_general_ci", "utf8mb4_bin"},
}
-
- for _, goldenPath := range gllGoldenTests {
- golden := &testutil.GoldenTest{}
- if err := golden.DecodeFromFile(goldenPath); err != nil {
- t.Fatal(err)
- }
-
- for _, goldenCase := range golden.Cases {
- t.Run(fmt.Sprintf("%s (%s)", golden.Name, goldenCase.Lang), func(t *testing.T) {
- for coll, expected := range goldenCase.Weights {
- coll := testcollation(t, coll)
-
- input, err := charset.ConvertFromUTF8(nil, coll.Charset(), goldenCase.Text)
- if err != nil {
- t.Fatal(err)
- }
-
- result := coll.WeightString(nil, input, 0)
- assert.True(t, bytes.Equal(expected, result), "mismatch for collation=%s\noriginal: %s\ninput: %#v\nexpected: %v\nactual: %v", coll.Name(), string(goldenCase.Text), input, expected, result)
-
- }
- })
- }
+ var defaults2 = map[string][2]string{
+ "utf8mb4": {"utf8mb4_0900_ai_ci", "utf8mb4_0900_bin"},
}
-}
-func TestCollationsForLanguage(t *testing.T) {
- allCollations := testall()
- langCounts := make(map[testutil.Lang][]string)
+ for _, tc := range []struct {
+ version collver
+ defaults map[string][2]string
+ }{
+ {collverMariaDB100, defaults1},
+ {collverMariaDB101, defaults1},
+ {collverMariaDB102, defaults1},
+ {collverMariaDB103, defaults1},
+ {collverMySQL56, defaults1},
+ {collverMySQL57, defaults1},
+ {collverMySQL8, defaults2},
+ } {
+ t.Run(tc.version.String(), func(t *testing.T) {
+ env := makeEnv(tc.version)
+ for csname, cset := range env.byCharset {
+ switch csname {
+ case "gb18030":
+ // this doesn't work yet
+ continue
+ }
+ require.NotNil(t, cset.Default, "charset %s has no default", csname)
+ require.NotNil(t, cset.Binary, "charset %s has no binary", csname)
- for lang := range testutil.KnownLanguages {
- var matched []string
- for _, coll := range allCollations {
- name := coll.Name()
- if lang.MatchesCollation(name) {
- matched = append(matched, name)
}
- }
- langCounts[lang] = matched
- }
- for lang := range testutil.KnownLanguages {
- assert.NotEqual(t, 0, len(langCounts[lang]), "no collations found for %q", lang)
-
- t.Logf("%s: %v", lang, langCounts[lang])
+ for charset, expected := range tc.defaults {
+ expectedDefault, expectedBinary := expected[0], expected[1]
+ if def := env.DefaultCollationForCharset(charset); env.LookupName(def) != expectedDefault {
+ t.Fatalf("bad default for utf8mb4: %s (expected %s)", env.LookupName(def), expectedDefault)
+ }
+ if def := env.BinaryCollationForCharset(charset); env.LookupName(def) != expectedBinary {
+ t.Fatalf("bad binary for utf8mb4: %s (expected %s)", env.LookupName(def), expectedBinary)
+ }
+ }
+ })
}
}
@@ -89,7 +76,7 @@ func TestCollationsForLanguage(t *testing.T) {
// table with Collation support information for the current build of Vitess.
func XTestSupportTables(t *testing.T) {
var versions = []collver{
- collverMySQL80,
+ collverMySQL8,
collverMySQL57,
collverMySQL56,
collverMariaDB103,
@@ -120,8 +107,8 @@ func XTestSupportTables(t *testing.T) {
fmt.Fprintf(out, " |\n|%s\n", strings.Repeat("---|", len(envs)+2))
for _, id := range all {
- coll := collationsById[id]
- if coll == nil {
+ name := envs[0].LookupName(id)
+ if name == "" {
vdata := globalVersionInfo[id]
var collnames []string
@@ -148,9 +135,9 @@ func XTestSupportTables(t *testing.T) {
}
}
} else {
- fmt.Fprintf(out, "| %s | %s", coll.Name(), coll.Charset().Name())
+ fmt.Fprintf(out, "| %s | %s", name, envs[0].LookupCharsetName(id))
for _, env := range envs {
- _, supported := env.byName[coll.Name()]
+ _, supported := env.LookupID(name)
if supported {
fmt.Fprintf(out, " | ✅")
} else {
@@ -162,49 +149,3 @@ func XTestSupportTables(t *testing.T) {
fmt.Fprintf(out, " |\n")
}
}
-
-func TestAllCollationsByCharset(t *testing.T) {
- var defaults1 = map[string][2]string{
- "utf8mb4": {"utf8mb4_general_ci", "utf8mb4_bin"},
- }
- var defaults2 = map[string][2]string{
- "utf8mb4": {"utf8mb4_0900_ai_ci", "utf8mb4_0900_bin"},
- }
-
- for _, tc := range []struct {
- version collver
- defaults map[string][2]string
- }{
- {collverMariaDB100, defaults1},
- {collverMariaDB101, defaults1},
- {collverMariaDB102, defaults1},
- {collverMariaDB103, defaults1},
- {collverMySQL56, defaults1},
- {collverMySQL57, defaults1},
- {collverMySQL80, defaults2},
- } {
- t.Run(tc.version.String(), func(t *testing.T) {
- env := makeEnv(tc.version)
- for csname, cset := range env.byCharset {
- switch csname {
- case "gb18030":
- // this doesn't work yet
- continue
- }
- require.NotNil(t, cset.Default, "charset %s has no default", csname)
- require.NotNil(t, cset.Binary, "charset %s has no binary", csname)
-
- }
-
- for charset, expected := range tc.defaults {
- expectedDefault, expectedBinary := expected[0], expected[1]
- if def := env.DefaultCollationForCharset(charset); def.Name() != expectedDefault {
- t.Fatalf("bad default for utf8mb4: %s (expected %s)", def.Name(), expectedDefault)
- }
- if def := env.BinaryCollationForCharset(charset); def.Name() != expectedBinary {
- t.Fatalf("bad binary for utf8mb4: %s (expected %s)", def.Name(), expectedBinary)
- }
- }
- })
- }
-}
diff --git a/go/mysql/collations/integration/charset_test.go b/go/mysql/collations/integration/charset_test.go
index 2705dc29f5d..8a4d12a0e4d 100644
--- a/go/mysql/collations/integration/charset_test.go
+++ b/go/mysql/collations/integration/charset_test.go
@@ -23,6 +23,8 @@ import (
"github.com/stretchr/testify/require"
+ "vitess.io/vitess/go/mysql/collations/colldata"
+
"vitess.io/vitess/go/mysql/collations"
"vitess.io/vitess/go/mysql/collations/charset"
"vitess.io/vitess/go/mysql/collations/remote"
@@ -45,7 +47,7 @@ func TestLocalEncodings(t *testing.T) {
for _, tc := range cases {
local := collations.Local().LookupByName(tc.collation)
remote := remote.NewCollation(conn, tc.collation)
- verifyTranscoding(t, local, remote, tc.input)
+ verifyTranscoding(t, colldata.Lookup(local), remote, tc.input)
}
}
diff --git a/go/mysql/collations/integration/coercion_test.go b/go/mysql/collations/integration/coercion_test.go
index 7ad31f78852..dad55bcafad 100644
--- a/go/mysql/collations/integration/coercion_test.go
+++ b/go/mysql/collations/integration/coercion_test.go
@@ -26,6 +26,8 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "vitess.io/vitess/go/mysql/collations/colldata"
+
"vitess.io/vitess/go/mysql/collations"
"vitess.io/vitess/go/mysql/collations/remote"
"vitess.io/vitess/go/sqltypes"
@@ -33,18 +35,18 @@ import (
type TextWithCollation struct {
Text []byte
- Collation collations.Collation
+ Collation collations.ID
}
type RemoteCoercionResult struct {
Expr sqltypes.Value
- Collation collations.Collation
+ Collation collations.ID
Coercibility collations.Coercibility
}
type RemoteCoercionTest interface {
Expression() string
- Test(t *testing.T, remote *RemoteCoercionResult, local collations.TypedCollation, coerce1, coerce2 collations.Coercion)
+ Test(t *testing.T, remote *RemoteCoercionResult, local collations.TypedCollation, coerce1, coerce2 colldata.Coercion)
}
type testConcat struct {
@@ -52,15 +54,17 @@ type testConcat struct {
}
func (tc *testConcat) Expression() string {
+ env := collations.Local()
return fmt.Sprintf("CONCAT((_%s X'%x' COLLATE %q), (_%s X'%x' COLLATE %q))",
- tc.left.Collation.Charset().Name(), tc.left.Text, tc.left.Collation.Name(),
- tc.right.Collation.Charset().Name(), tc.right.Text, tc.right.Collation.Name(),
+ colldata.Lookup(tc.left.Collation).Charset().Name(), tc.left.Text, env.LookupName(tc.left.Collation),
+ colldata.Lookup(tc.right.Collation).Charset().Name(), tc.right.Text, env.LookupName(tc.right.Collation),
)
}
-func (tc *testConcat) Test(t *testing.T, remote *RemoteCoercionResult, local collations.TypedCollation, coercion1, coercion2 collations.Coercion) {
- localCollation := local.Collation.Get()
- assert.Equal(t, remote.Collation.Name(), localCollation.Name(), "bad collation resolved: local is %s, remote is %s", localCollation.Name(), remote.Collation.Name())
+func (tc *testConcat) Test(t *testing.T, remote *RemoteCoercionResult, local collations.TypedCollation, coercion1, coercion2 colldata.Coercion) {
+ localCollation := colldata.Lookup(local.Collation)
+ remoteName := collations.Local().LookupName(remote.Collation)
+ assert.Equal(t, remoteName, localCollation.Name(), "bad collation resolved: local is %s, remote is %s", localCollation.Name(), remoteName)
assert.Equal(t, remote.Coercibility, local.Coercibility, "bad coercibility resolved: local is %d, remote is %d", local.Coercibility, remote.Coercibility)
leftText, err := coercion1(nil, tc.left.Text)
@@ -81,8 +85,8 @@ func (tc *testConcat) Test(t *testing.T, remote *RemoteCoercionResult, local col
rEBytes, err := remote.Expr.ToBytes()
require.NoError(t, err)
- assert.True(t, bytes.Equal(concat.Bytes(), rEBytes), "failed to concatenate text;\n\tCONCAT(%v COLLATE %s, %v COLLATE %s) = \n\tCONCAT(%v, %v) COLLATE %s = \n\t\t%v\n\n\texpected: %v", tc.left.Text, tc.left.Collation.Name(),
- tc.right.Text, tc.right.Collation.Name(), leftText, rightText, localCollation.Name(),
+ assert.True(t, bytes.Equal(concat.Bytes(), rEBytes), "failed to concatenate text;\n\tCONCAT(%v COLLATE %s, %v COLLATE %s) = \n\tCONCAT(%v, %v) COLLATE %s = \n\t\t%v\n\n\texpected: %v", tc.left.Text, collations.Local().LookupName(tc.left.Collation),
+ tc.right.Text, collations.Local().LookupName(tc.right.Collation), leftText, rightText, localCollation.Name(),
concat.Bytes(), rEBytes)
}
@@ -92,14 +96,15 @@ type testComparison struct {
}
func (tc *testComparison) Expression() string {
+ env := collations.Local()
return fmt.Sprintf("(_%s X'%x' COLLATE %q) = (_%s X'%x' COLLATE %q)",
- tc.left.Collation.Charset().Name(), tc.left.Text, tc.left.Collation.Name(),
- tc.right.Collation.Charset().Name(), tc.right.Text, tc.right.Collation.Name(),
+ env.LookupCharsetName(tc.left.Collation), tc.left.Text, env.LookupName(tc.left.Collation),
+ env.LookupCharsetName(tc.right.Collation), tc.right.Text, env.LookupName(tc.right.Collation),
)
}
-func (tc *testComparison) Test(t *testing.T, remote *RemoteCoercionResult, local collations.TypedCollation, coerce1, coerce2 collations.Coercion) {
- localCollation := local.Collation.Get()
+func (tc *testComparison) Test(t *testing.T, remote *RemoteCoercionResult, local collations.TypedCollation, coerce1, coerce2 colldata.Coercion) {
+ localCollation := colldata.Lookup(local.Collation)
leftText, err := coerce1(nil, tc.left.Text)
if err != nil {
t.Errorf("failed to transcode left: %v", err)
@@ -130,12 +135,12 @@ func TestComparisonSemantics(t *testing.T) {
t.Skipf("The behavior of Coercion Semantics is not correct before 8.0.31")
}
- for _, coll := range collations.Local().AllCollations() {
+ for _, coll := range colldata.All(collations.Local()) {
text := verifyTranscoding(t, coll, remote.NewCollation(conn, coll.Name()), []byte(BaseString))
- testInputs = append(testInputs, &TextWithCollation{Text: text, Collation: coll})
+ testInputs = append(testInputs, &TextWithCollation{Text: text, Collation: coll.ID()})
}
sort.Slice(testInputs, func(i, j int) bool {
- return testInputs[i].Collation.ID() < testInputs[j].Collation.ID()
+ return testInputs[i].Collation < testInputs[j].Collation
})
var testCases = []struct {
@@ -161,17 +166,17 @@ func TestComparisonSemantics(t *testing.T) {
for _, collA := range testInputs {
for _, collB := range testInputs {
left := collations.TypedCollation{
- Collation: collA.Collation.ID(),
+ Collation: collA.Collation,
Coercibility: 0,
Repertoire: collations.RepertoireASCII,
}
right := collations.TypedCollation{
- Collation: collB.Collation.ID(),
+ Collation: collB.Collation,
Coercibility: 0,
Repertoire: collations.RepertoireASCII,
}
- resultLocal, coercionLocal1, coercionLocal2, errLocal := collations.Local().MergeCollations(left, right,
- collations.CoercionOptions{
+ resultLocal, coercionLocal1, coercionLocal2, errLocal := colldata.Merge(collations.Local(), left, right,
+ colldata.CoercionOptions{
ConvertToSuperset: true,
ConvertWithCoercion: true,
})
@@ -189,11 +194,12 @@ func TestComparisonSemantics(t *testing.T) {
query := fmt.Sprintf("SELECT CAST((%s) AS BINARY), COLLATION(%s), COERCIBILITY(%s)", expr, expr, expr)
resultRemote, errRemote := conn.ExecuteFetch(query, 1, false)
+ env := collations.Local()
if errRemote != nil {
require.True(t, strings.Contains(errRemote.Error(), "Illegal mix of collations"), "query %s failed: %v", query, errRemote)
if errLocal == nil {
- t.Errorf("expected %s vs %s to fail coercion: %v", collA.Collation.Name(), collB.Collation.Name(), errRemote)
+ t.Errorf("expected %s vs %s to fail coercion: %v", env.LookupName(collA.Collation), env.LookupName(collB.Collation), errRemote)
continue
}
require.True(t, strings.HasPrefix(normalizeCollationInError(errRemote.Error()), normalizeCollationInError(errLocal.Error())), "bad error message: expected %q, got %q", errRemote, errLocal)
@@ -202,7 +208,7 @@ func TestComparisonSemantics(t *testing.T) {
}
if errLocal != nil {
- t.Errorf("expected %s vs %s to coerce, but they failed: %v", collA.Collation.Name(), collB.Collation.Name(), errLocal)
+ t.Errorf("expected %s vs %s to coerce, but they failed: %v", env.LookupName(collA.Collation), env.LookupName(collB.Collation), errLocal)
continue
}
diff --git a/go/mysql/collations/integration/collations_test.go b/go/mysql/collations/integration/collations_test.go
index 32ffb81a498..3b33e23e2d3 100644
--- a/go/mysql/collations/integration/collations_test.go
+++ b/go/mysql/collations/integration/collations_test.go
@@ -31,6 +31,8 @@ import (
"github.com/stretchr/testify/require"
"golang.org/x/text/encoding/unicode/utf32"
+ "vitess.io/vitess/go/mysql/collations/colldata"
+
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/mysql/collations"
"vitess.io/vitess/go/mysql/collations/remote"
@@ -140,7 +142,7 @@ func (u *uca900CollationTest) Test(t *testing.T, result *sqltypes.Result) {
continue
}
- weightString := coll.WeightString(make([]byte, 0, 128), utf8Input, 0)
+ weightString := colldata.Lookup(coll).WeightString(make([]byte, 0, 128), utf8Input, 0)
if !bytes.Equal(weightString, expectedWeightString) {
t.Errorf("[%s] mismatch for %s (%v): \n\twant: %v\n\tgot: %v", u.collation, row[2].ToString(), utf8Input, expectedWeightString, weightString)
errors++
@@ -227,7 +229,7 @@ func TestCollationWithSpace(t *testing.T) {
remote := remote.NewCollation(conn, collName)
for _, size := range []int{0, codepoints, codepoints + 1, codepoints + 2, 20, 32} {
- localWeight := local.WeightString(nil, []byte(ExampleString), size)
+ localWeight := colldata.Lookup(local).WeightString(nil, []byte(ExampleString), size)
remoteWeight := remote.WeightString(nil, []byte(ExampleString), size)
require.True(t, bytes.Equal(localWeight, remoteWeight), "mismatch at len=%d\ninput: %#v\nexpected: %#v\nactual: %#v", size, []byte(ExampleString), remoteWeight, localWeight)
diff --git a/go/mysql/collations/integration/helpers_test.go b/go/mysql/collations/integration/helpers_test.go
index 95410fbb74a..d436280f04b 100644
--- a/go/mysql/collations/integration/helpers_test.go
+++ b/go/mysql/collations/integration/helpers_test.go
@@ -27,6 +27,8 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "vitess.io/vitess/go/mysql/collations/colldata"
+
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/mysql/collations"
"vitess.io/vitess/go/mysql/collations/charset"
@@ -52,7 +54,7 @@ func testRemoteWeights(t *testing.T, golden io.Writer, cases []testweight) {
t.Run(tc.collation, func(t *testing.T) {
local := collations.Local().LookupByName(tc.collation)
remote := remote.NewCollation(conn, tc.collation)
- localResult := local.WeightString(nil, tc.input, 0)
+ localResult := colldata.Lookup(local).WeightString(nil, tc.input, 0)
remoteResult := remote.WeightString(nil, tc.input, 0)
if err := remote.LastError(); err != nil {
@@ -85,7 +87,7 @@ func testRemoteComparison(t *testing.T, golden io.Writer, cases []testcmp) {
t.Run(tc.collation, func(t *testing.T) {
local := collations.Local().LookupByName(tc.collation)
remote := remote.NewCollation(conn, tc.collation)
- localResult := normalizecmp(local.Collate(tc.left, tc.right, false))
+ localResult := normalizecmp(colldata.Lookup(local).Collate(tc.left, tc.right, false))
remoteResult := remote.Collate(tc.left, tc.right, false)
if err := remote.LastError(); err != nil {
@@ -101,7 +103,7 @@ func testRemoteComparison(t *testing.T, golden io.Writer, cases []testcmp) {
}
}
-func verifyTranscoding(t *testing.T, local collations.Collation, remote *remote.Collation, text []byte) []byte {
+func verifyTranscoding(t *testing.T, local colldata.Collation, remote *remote.Collation, text []byte) []byte {
transRemote, err := charset.ConvertFromUTF8(nil, remote.Charset(), text)
require.NoError(t, err, "remote transcoding failed: %v", err)
@@ -112,7 +114,7 @@ func verifyTranscoding(t *testing.T, local collations.Collation, remote *remote.
return transLocal
}
-func verifyWeightString(t *testing.T, local collations.Collation, remote *remote.Collation, text []byte) {
+func verifyWeightString(t *testing.T, local colldata.Collation, remote *remote.Collation, text []byte) {
localResult := local.WeightString(nil, text, 0)
remoteResult := remote.WeightString(nil, text, 0)
diff --git a/go/mysql/collations/integration/weight_string_test.go b/go/mysql/collations/integration/weight_string_test.go
index c93a9ed586e..170da4f5987 100644
--- a/go/mysql/collations/integration/weight_string_test.go
+++ b/go/mysql/collations/integration/weight_string_test.go
@@ -25,6 +25,7 @@ import (
"vitess.io/vitess/go/mysql/collations"
"vitess.io/vitess/go/mysql/collations/charset"
+ "vitess.io/vitess/go/mysql/collations/colldata"
"vitess.io/vitess/go/mysql/collations/internal/testutil"
"vitess.io/vitess/go/mysql/collations/remote"
)
@@ -46,7 +47,7 @@ func TestFastIterators(t *testing.T) {
func TestWeightStringsComprehensive(t *testing.T) {
type collationsForCharset struct {
charset charset.Charset
- locals []collations.Collation
+ locals []colldata.Collation
remotes []*remote.Collation
}
var charsetMap = make(map[string]*collationsForCharset)
@@ -59,7 +60,7 @@ func TestWeightStringsComprehensive(t *testing.T) {
conn := mysqlconn(t)
defer conn.Close()
- allCollations := collations.Local().AllCollations()
+ allCollations := colldata.All(collations.Local())
sort.Slice(allCollations, func(i, j int) bool {
return allCollations[i].ID() < allCollations[j].ID()
})
@@ -103,16 +104,16 @@ func TestCJKWeightStrings(t *testing.T) {
conn := mysqlconn(t)
defer conn.Close()
- allCollations := collations.Local().AllCollations()
+ allCollations := colldata.All(collations.Local())
testdata, _ := filepath.Glob("../internal/charset/testdata/*.txt")
for _, testfile := range testdata {
- charset := filepath.Base(testfile)
- charset = strings.TrimSuffix(charset, ".txt")
- charset = charset[strings.LastIndexByte(charset, '-')+1:]
+ cs := filepath.Base(testfile)
+ cs = strings.TrimSuffix(cs, ".txt")
+ cs = cs[strings.LastIndexByte(cs, '-')+1:]
- var valid []collations.Collation
+ var valid []colldata.Collation
for _, coll := range allCollations {
- if coll.Charset().Name() == charset {
+ if coll.Charset().Name() == cs {
valid = append(valid, coll)
t.Logf("%s -> %s", testfile, coll.Name())
}
diff --git a/go/mysql/collations/integration/wildcard_test.go b/go/mysql/collations/integration/wildcard_test.go
index a848e5b7867..6475a35dd21 100644
--- a/go/mysql/collations/integration/wildcard_test.go
+++ b/go/mysql/collations/integration/wildcard_test.go
@@ -22,6 +22,7 @@ import (
"vitess.io/vitess/go/mysql/collations"
"vitess.io/vitess/go/mysql/collations/charset"
+ "vitess.io/vitess/go/mysql/collations/colldata"
"vitess.io/vitess/go/mysql/collations/remote"
)
@@ -78,7 +79,7 @@ func TestRemoteWildcardMatches(t *testing.T) {
{"Ǎḅeçd", "a%bd"},
}
- for _, local := range collations.Local().AllCollations() {
+ for _, local := range colldata.All(collations.Local()) {
t.Run(local.Name(), func(t *testing.T) {
var remote = remote.NewCollation(conn, local.Name())
var err error
diff --git a/go/mysql/collations/internal/uca/contractions.go b/go/mysql/collations/internal/uca/contractions.go
index c4ff99d42e2..d894b0e206e 100644
--- a/go/mysql/collations/internal/uca/contractions.go
+++ b/go/mysql/collations/internal/uca/contractions.go
@@ -18,7 +18,6 @@ package uca
import (
"fmt"
- "unicode/utf8"
"vitess.io/vitess/go/mysql/collations/charset"
)
@@ -28,19 +27,6 @@ type trie struct {
weights []uint16
}
-func (t *trie) walkUTF8(remainder []byte) ([]uint16, []byte) {
- if len(remainder) > 0 {
- cp, width := utf8.DecodeRune(remainder)
- if cp == utf8.RuneError && width < 3 {
- return nil, nil
- }
- if ch := t.children[cp]; ch != nil {
- return ch.walkUTF8(remainder[width:])
- }
- }
- return t.weights, remainder
-}
-
func (t *trie) walkCharset(cs charset.Charset, remainder []byte, depth int) ([]uint16, []byte, int) {
if len(remainder) > 0 {
cp, width := cs.DecodeRune(remainder)
diff --git a/go/mysql/collations/internal/uca/fasttables.go b/go/mysql/collations/internal/uca/fasttables.go
index 1995a78a664..40f3718babe 100644
--- a/go/mysql/collations/internal/uca/fasttables.go
+++ b/go/mysql/collations/internal/uca/fasttables.go
@@ -1,3 +1,19 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
// Code generated by makecolldata DO NOT EDIT
package uca
diff --git a/go/mysql/collations/internal/uca/layout.go b/go/mysql/collations/internal/uca/layout.go
index a5ee45a0ece..35a2749eb21 100644
--- a/go/mysql/collations/internal/uca/layout.go
+++ b/go/mysql/collations/internal/uca/layout.go
@@ -17,7 +17,6 @@ limitations under the License.
package uca
import (
- "reflect"
"sync"
"unsafe"
)
@@ -287,29 +286,29 @@ func (Layout_uca_legacy) applyPatches(page []uint16, offset int, weights []uint1
}
type tableWithPatch struct {
- tableptr uintptr
- patchptr uintptr
+ tableptr unsafe.Pointer
+ patchptr unsafe.Pointer
}
var cachedTables = make(map[tableWithPatch]Weights)
var cachedTablesMu sync.Mutex
func lookupCachedTable(table Weights, patch []Patch) (Weights, bool) {
- hdr1 := (*reflect.SliceHeader)(unsafe.Pointer(&table))
- hdr2 := (*reflect.SliceHeader)(unsafe.Pointer(&patch))
+ data1 := unsafe.Pointer(unsafe.SliceData(table))
+ data2 := unsafe.Pointer(unsafe.SliceData(patch))
cachedTablesMu.Lock()
defer cachedTablesMu.Unlock()
- tbl, ok := cachedTables[tableWithPatch{hdr1.Data, hdr2.Data}]
+ tbl, ok := cachedTables[tableWithPatch{tableptr: data1, patchptr: data2}]
return tbl, ok
}
func storeCachedTable(table Weights, patch []Patch, result Weights) {
- hdr1 := (*reflect.SliceHeader)(unsafe.Pointer(&table))
- hdr2 := (*reflect.SliceHeader)(unsafe.Pointer(&patch))
+ data1 := unsafe.Pointer(unsafe.SliceData(table))
+ data2 := unsafe.Pointer(unsafe.SliceData(patch))
cachedTablesMu.Lock()
- cachedTables[tableWithPatch{hdr1.Data, hdr2.Data}] = result
+ cachedTables[tableWithPatch{tableptr: data1, patchptr: data2}] = result
cachedTablesMu.Unlock()
}
diff --git a/go/mysql/collations/mysqlversion.go b/go/mysql/collations/mysqlversion.go
index 2a1409fbb7e..93d1add9b6a 100644
--- a/go/mysql/collations/mysqlversion.go
+++ b/go/mysql/collations/mysqlversion.go
@@ -1,11 +1,28 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
// Code generated by makecolldata DO NOT EDIT
package collations
type collver byte
type collalias struct {
- mask collver
- name string
+ mask collver
+ name string
+ charset string
}
const (
@@ -16,7 +33,7 @@ const (
collverMariaDB103 collver = 1 << 3
collverMySQL56 collver = 1 << 4
collverMySQL57 collver = 1 << 5
- collverMySQL80 collver = 1 << 6
+ collverMySQL8 collver = 1 << 6
)
func (v collver) String() string {
@@ -35,405 +52,405 @@ func (v collver) String() string {
return "MySQL 5.6"
case collverMySQL57:
return "MySQL 5.7"
- case collverMySQL80:
- return "MySQL 8.0"
+ case collverMySQL8:
+ return "MySQL 0.8"
default:
panic("invalid version identifier")
}
}
-func (v collver) charsetAliases() map[string]string { return map[string]string{"utf8": "utf8mb3"} }
+func charsetAliases() map[string]string { return map[string]string{"utf8": "utf8mb3"} }
var globalVersionInfo = map[ID]struct {
alias []collalias
isdefault collver
}{
- 1: {alias: []collalias{{0b01111111, "big5_chinese_ci"}}, isdefault: 0b01111111},
- 2: {alias: []collalias{{0b01111111, "latin2_czech_cs"}}, isdefault: 0b00000000},
- 3: {alias: []collalias{{0b01111111, "dec8_swedish_ci"}}, isdefault: 0b01111111},
- 4: {alias: []collalias{{0b01111111, "cp850_general_ci"}}, isdefault: 0b01111111},
- 5: {alias: []collalias{{0b01111111, "latin1_german1_ci"}}, isdefault: 0b00000000},
- 6: {alias: []collalias{{0b01111111, "hp8_english_ci"}}, isdefault: 0b01111111},
- 7: {alias: []collalias{{0b01111111, "koi8r_general_ci"}}, isdefault: 0b01111111},
- 8: {alias: []collalias{{0b01111111, "latin1_swedish_ci"}}, isdefault: 0b01111111},
- 9: {alias: []collalias{{0b01111111, "latin2_general_ci"}}, isdefault: 0b01111111},
- 10: {alias: []collalias{{0b01111111, "swe7_swedish_ci"}}, isdefault: 0b01111111},
- 11: {alias: []collalias{{0b01111111, "ascii_general_ci"}}, isdefault: 0b01111111},
- 12: {alias: []collalias{{0b01111111, "ujis_japanese_ci"}}, isdefault: 0b01111111},
- 13: {alias: []collalias{{0b01111111, "sjis_japanese_ci"}}, isdefault: 0b01111111},
- 14: {alias: []collalias{{0b01111111, "cp1251_bulgarian_ci"}}, isdefault: 0b00000000},
- 15: {alias: []collalias{{0b01111111, "latin1_danish_ci"}}, isdefault: 0b00000000},
- 16: {alias: []collalias{{0b01111111, "hebrew_general_ci"}}, isdefault: 0b01111111},
- 18: {alias: []collalias{{0b01111111, "tis620_thai_ci"}}, isdefault: 0b01111111},
- 19: {alias: []collalias{{0b01111111, "euckr_korean_ci"}}, isdefault: 0b01111111},
- 20: {alias: []collalias{{0b01111111, "latin7_estonian_cs"}}, isdefault: 0b00000000},
- 21: {alias: []collalias{{0b01111111, "latin2_hungarian_ci"}}, isdefault: 0b00000000},
- 22: {alias: []collalias{{0b01111111, "koi8u_general_ci"}}, isdefault: 0b01111111},
- 23: {alias: []collalias{{0b01111111, "cp1251_ukrainian_ci"}}, isdefault: 0b00000000},
- 24: {alias: []collalias{{0b01111111, "gb2312_chinese_ci"}}, isdefault: 0b01111111},
- 25: {alias: []collalias{{0b01111111, "greek_general_ci"}}, isdefault: 0b01111111},
- 26: {alias: []collalias{{0b01111111, "cp1250_general_ci"}}, isdefault: 0b01111111},
- 27: {alias: []collalias{{0b01111111, "latin2_croatian_ci"}}, isdefault: 0b00000000},
- 28: {alias: []collalias{{0b01111111, "gbk_chinese_ci"}}, isdefault: 0b01111111},
- 29: {alias: []collalias{{0b01111111, "cp1257_lithuanian_ci"}}, isdefault: 0b00000000},
- 30: {alias: []collalias{{0b01111111, "latin5_turkish_ci"}}, isdefault: 0b01111111},
- 31: {alias: []collalias{{0b01111111, "latin1_german2_ci"}}, isdefault: 0b00000000},
- 32: {alias: []collalias{{0b01111111, "armscii8_general_ci"}}, isdefault: 0b01111111},
- 33: {alias: []collalias{{0b01111111, "utf8_general_ci"}, {0b01111111, "utf8mb3_general_ci"}}, isdefault: 0b01111111},
- 34: {alias: []collalias{{0b01111111, "cp1250_czech_cs"}}, isdefault: 0b00000000},
- 35: {alias: []collalias{{0b01111111, "ucs2_general_ci"}}, isdefault: 0b01111111},
- 36: {alias: []collalias{{0b01111111, "cp866_general_ci"}}, isdefault: 0b01111111},
- 37: {alias: []collalias{{0b01111111, "keybcs2_general_ci"}}, isdefault: 0b01111111},
- 38: {alias: []collalias{{0b01111111, "macce_general_ci"}}, isdefault: 0b01111111},
- 39: {alias: []collalias{{0b01111111, "macroman_general_ci"}}, isdefault: 0b01111111},
- 40: {alias: []collalias{{0b01111111, "cp852_general_ci"}}, isdefault: 0b01111111},
- 41: {alias: []collalias{{0b01111111, "latin7_general_ci"}}, isdefault: 0b01111111},
- 42: {alias: []collalias{{0b01111111, "latin7_general_cs"}}, isdefault: 0b00000000},
- 43: {alias: []collalias{{0b01111111, "macce_bin"}}, isdefault: 0b00000000},
- 44: {alias: []collalias{{0b01111111, "cp1250_croatian_ci"}}, isdefault: 0b00000000},
- 45: {alias: []collalias{{0b01111111, "utf8mb4_general_ci"}}, isdefault: 0b00111111},
- 46: {alias: []collalias{{0b01111111, "utf8mb4_bin"}}, isdefault: 0b00000000},
- 47: {alias: []collalias{{0b01111111, "latin1_bin"}}, isdefault: 0b00000000},
- 48: {alias: []collalias{{0b01111111, "latin1_general_ci"}}, isdefault: 0b00000000},
- 49: {alias: []collalias{{0b01111111, "latin1_general_cs"}}, isdefault: 0b00000000},
- 50: {alias: []collalias{{0b01111111, "cp1251_bin"}}, isdefault: 0b00000000},
- 51: {alias: []collalias{{0b01111111, "cp1251_general_ci"}}, isdefault: 0b01111111},
- 52: {alias: []collalias{{0b01111111, "cp1251_general_cs"}}, isdefault: 0b00000000},
- 53: {alias: []collalias{{0b01111111, "macroman_bin"}}, isdefault: 0b00000000},
- 54: {alias: []collalias{{0b01111111, "utf16_general_ci"}}, isdefault: 0b01111111},
- 55: {alias: []collalias{{0b01111111, "utf16_bin"}}, isdefault: 0b00000000},
- 56: {alias: []collalias{{0b01111111, "utf16le_general_ci"}}, isdefault: 0b01111111},
- 57: {alias: []collalias{{0b01111111, "cp1256_general_ci"}}, isdefault: 0b01111111},
- 58: {alias: []collalias{{0b01111111, "cp1257_bin"}}, isdefault: 0b00000000},
- 59: {alias: []collalias{{0b01111111, "cp1257_general_ci"}}, isdefault: 0b01111111},
- 60: {alias: []collalias{{0b01111111, "utf32_general_ci"}}, isdefault: 0b01111111},
- 61: {alias: []collalias{{0b01111111, "utf32_bin"}}, isdefault: 0b00000000},
- 62: {alias: []collalias{{0b01111111, "utf16le_bin"}}, isdefault: 0b00000000},
- 63: {alias: []collalias{{0b01111111, "binary"}}, isdefault: 0b01111111},
- 64: {alias: []collalias{{0b01111111, "armscii8_bin"}}, isdefault: 0b00000000},
- 65: {alias: []collalias{{0b01111111, "ascii_bin"}}, isdefault: 0b00000000},
- 66: {alias: []collalias{{0b01111111, "cp1250_bin"}}, isdefault: 0b00000000},
- 67: {alias: []collalias{{0b01111111, "cp1256_bin"}}, isdefault: 0b00000000},
- 68: {alias: []collalias{{0b01111111, "cp866_bin"}}, isdefault: 0b00000000},
- 69: {alias: []collalias{{0b01111111, "dec8_bin"}}, isdefault: 0b00000000},
- 70: {alias: []collalias{{0b01111111, "greek_bin"}}, isdefault: 0b00000000},
- 71: {alias: []collalias{{0b01111111, "hebrew_bin"}}, isdefault: 0b00000000},
- 72: {alias: []collalias{{0b01111111, "hp8_bin"}}, isdefault: 0b00000000},
- 73: {alias: []collalias{{0b01111111, "keybcs2_bin"}}, isdefault: 0b00000000},
- 74: {alias: []collalias{{0b01111111, "koi8r_bin"}}, isdefault: 0b00000000},
- 75: {alias: []collalias{{0b01111111, "koi8u_bin"}}, isdefault: 0b00000000},
- 76: {alias: []collalias{{0b01000000, "utf8_tolower_ci"}, {0b01000000, "utf8mb3_tolower_ci"}}, isdefault: 0b00000000},
- 77: {alias: []collalias{{0b01111111, "latin2_bin"}}, isdefault: 0b00000000},
- 78: {alias: []collalias{{0b01111111, "latin5_bin"}}, isdefault: 0b00000000},
- 79: {alias: []collalias{{0b01111111, "latin7_bin"}}, isdefault: 0b00000000},
- 80: {alias: []collalias{{0b01111111, "cp850_bin"}}, isdefault: 0b00000000},
- 81: {alias: []collalias{{0b01111111, "cp852_bin"}}, isdefault: 0b00000000},
- 82: {alias: []collalias{{0b01111111, "swe7_bin"}}, isdefault: 0b00000000},
- 83: {alias: []collalias{{0b01111111, "utf8_bin"}, {0b01111111, "utf8mb3_bin"}}, isdefault: 0b00000000},
- 84: {alias: []collalias{{0b01111111, "big5_bin"}}, isdefault: 0b00000000},
- 85: {alias: []collalias{{0b01111111, "euckr_bin"}}, isdefault: 0b00000000},
- 86: {alias: []collalias{{0b01111111, "gb2312_bin"}}, isdefault: 0b00000000},
- 87: {alias: []collalias{{0b01111111, "gbk_bin"}}, isdefault: 0b00000000},
- 88: {alias: []collalias{{0b01111111, "sjis_bin"}}, isdefault: 0b00000000},
- 89: {alias: []collalias{{0b01111111, "tis620_bin"}}, isdefault: 0b00000000},
- 90: {alias: []collalias{{0b01111111, "ucs2_bin"}}, isdefault: 0b00000000},
- 91: {alias: []collalias{{0b01111111, "ujis_bin"}}, isdefault: 0b00000000},
- 92: {alias: []collalias{{0b01111111, "geostd8_general_ci"}}, isdefault: 0b01111111},
- 93: {alias: []collalias{{0b01111111, "geostd8_bin"}}, isdefault: 0b00000000},
- 94: {alias: []collalias{{0b01111111, "latin1_spanish_ci"}}, isdefault: 0b00000000},
- 95: {alias: []collalias{{0b01111111, "cp932_japanese_ci"}}, isdefault: 0b01111111},
- 96: {alias: []collalias{{0b01111111, "cp932_bin"}}, isdefault: 0b00000000},
- 97: {alias: []collalias{{0b01111111, "eucjpms_japanese_ci"}}, isdefault: 0b01111111},
- 98: {alias: []collalias{{0b01111111, "eucjpms_bin"}}, isdefault: 0b00000000},
- 99: {alias: []collalias{{0b01111111, "cp1250_polish_ci"}}, isdefault: 0b00000000},
- 101: {alias: []collalias{{0b01111111, "utf16_unicode_ci"}}, isdefault: 0b00000000},
- 102: {alias: []collalias{{0b01111111, "utf16_icelandic_ci"}}, isdefault: 0b00000000},
- 103: {alias: []collalias{{0b01111111, "utf16_latvian_ci"}}, isdefault: 0b00000000},
- 104: {alias: []collalias{{0b01111111, "utf16_romanian_ci"}}, isdefault: 0b00000000},
- 105: {alias: []collalias{{0b01111111, "utf16_slovenian_ci"}}, isdefault: 0b00000000},
- 106: {alias: []collalias{{0b01111111, "utf16_polish_ci"}}, isdefault: 0b00000000},
- 107: {alias: []collalias{{0b01111111, "utf16_estonian_ci"}}, isdefault: 0b00000000},
- 108: {alias: []collalias{{0b01111111, "utf16_spanish_ci"}}, isdefault: 0b00000000},
- 109: {alias: []collalias{{0b01111111, "utf16_swedish_ci"}}, isdefault: 0b00000000},
- 110: {alias: []collalias{{0b01111111, "utf16_turkish_ci"}}, isdefault: 0b00000000},
- 111: {alias: []collalias{{0b01111111, "utf16_czech_ci"}}, isdefault: 0b00000000},
- 112: {alias: []collalias{{0b01111111, "utf16_danish_ci"}}, isdefault: 0b00000000},
- 113: {alias: []collalias{{0b01111111, "utf16_lithuanian_ci"}}, isdefault: 0b00000000},
- 114: {alias: []collalias{{0b01111111, "utf16_slovak_ci"}}, isdefault: 0b00000000},
- 115: {alias: []collalias{{0b01111111, "utf16_spanish2_ci"}}, isdefault: 0b00000000},
- 116: {alias: []collalias{{0b01111111, "utf16_roman_ci"}}, isdefault: 0b00000000},
- 117: {alias: []collalias{{0b01111111, "utf16_persian_ci"}}, isdefault: 0b00000000},
- 118: {alias: []collalias{{0b01111111, "utf16_esperanto_ci"}}, isdefault: 0b00000000},
- 119: {alias: []collalias{{0b01111111, "utf16_hungarian_ci"}}, isdefault: 0b00000000},
- 120: {alias: []collalias{{0b01111111, "utf16_sinhala_ci"}}, isdefault: 0b00000000},
- 121: {alias: []collalias{{0b01111111, "utf16_german2_ci"}}, isdefault: 0b00000000},
- 122: {alias: []collalias{{0b01110000, "utf16_croatian_ci"}, {0b00001111, "utf16_croatian_mysql561_ci"}}, isdefault: 0b00000000},
- 123: {alias: []collalias{{0b01111111, "utf16_unicode_520_ci"}}, isdefault: 0b00000000},
- 124: {alias: []collalias{{0b01111111, "utf16_vietnamese_ci"}}, isdefault: 0b00000000},
- 128: {alias: []collalias{{0b01111111, "ucs2_unicode_ci"}}, isdefault: 0b00000000},
- 129: {alias: []collalias{{0b01111111, "ucs2_icelandic_ci"}}, isdefault: 0b00000000},
- 130: {alias: []collalias{{0b01111111, "ucs2_latvian_ci"}}, isdefault: 0b00000000},
- 131: {alias: []collalias{{0b01111111, "ucs2_romanian_ci"}}, isdefault: 0b00000000},
- 132: {alias: []collalias{{0b01111111, "ucs2_slovenian_ci"}}, isdefault: 0b00000000},
- 133: {alias: []collalias{{0b01111111, "ucs2_polish_ci"}}, isdefault: 0b00000000},
- 134: {alias: []collalias{{0b01111111, "ucs2_estonian_ci"}}, isdefault: 0b00000000},
- 135: {alias: []collalias{{0b01111111, "ucs2_spanish_ci"}}, isdefault: 0b00000000},
- 136: {alias: []collalias{{0b01111111, "ucs2_swedish_ci"}}, isdefault: 0b00000000},
- 137: {alias: []collalias{{0b01111111, "ucs2_turkish_ci"}}, isdefault: 0b00000000},
- 138: {alias: []collalias{{0b01111111, "ucs2_czech_ci"}}, isdefault: 0b00000000},
- 139: {alias: []collalias{{0b01111111, "ucs2_danish_ci"}}, isdefault: 0b00000000},
- 140: {alias: []collalias{{0b01111111, "ucs2_lithuanian_ci"}}, isdefault: 0b00000000},
- 141: {alias: []collalias{{0b01111111, "ucs2_slovak_ci"}}, isdefault: 0b00000000},
- 142: {alias: []collalias{{0b01111111, "ucs2_spanish2_ci"}}, isdefault: 0b00000000},
- 143: {alias: []collalias{{0b01111111, "ucs2_roman_ci"}}, isdefault: 0b00000000},
- 144: {alias: []collalias{{0b01111111, "ucs2_persian_ci"}}, isdefault: 0b00000000},
- 145: {alias: []collalias{{0b01111111, "ucs2_esperanto_ci"}}, isdefault: 0b00000000},
- 146: {alias: []collalias{{0b01111111, "ucs2_hungarian_ci"}}, isdefault: 0b00000000},
- 147: {alias: []collalias{{0b01111111, "ucs2_sinhala_ci"}}, isdefault: 0b00000000},
- 148: {alias: []collalias{{0b01111111, "ucs2_german2_ci"}}, isdefault: 0b00000000},
- 149: {alias: []collalias{{0b01110000, "ucs2_croatian_ci"}, {0b00001111, "ucs2_croatian_mysql561_ci"}}, isdefault: 0b00000000},
- 150: {alias: []collalias{{0b01111111, "ucs2_unicode_520_ci"}}, isdefault: 0b00000000},
- 151: {alias: []collalias{{0b01111111, "ucs2_vietnamese_ci"}}, isdefault: 0b00000000},
- 159: {alias: []collalias{{0b01111111, "ucs2_general_mysql500_ci"}}, isdefault: 0b00000000},
- 160: {alias: []collalias{{0b01111111, "utf32_unicode_ci"}}, isdefault: 0b00000000},
- 161: {alias: []collalias{{0b01111111, "utf32_icelandic_ci"}}, isdefault: 0b00000000},
- 162: {alias: []collalias{{0b01111111, "utf32_latvian_ci"}}, isdefault: 0b00000000},
- 163: {alias: []collalias{{0b01111111, "utf32_romanian_ci"}}, isdefault: 0b00000000},
- 164: {alias: []collalias{{0b01111111, "utf32_slovenian_ci"}}, isdefault: 0b00000000},
- 165: {alias: []collalias{{0b01111111, "utf32_polish_ci"}}, isdefault: 0b00000000},
- 166: {alias: []collalias{{0b01111111, "utf32_estonian_ci"}}, isdefault: 0b00000000},
- 167: {alias: []collalias{{0b01111111, "utf32_spanish_ci"}}, isdefault: 0b00000000},
- 168: {alias: []collalias{{0b01111111, "utf32_swedish_ci"}}, isdefault: 0b00000000},
- 169: {alias: []collalias{{0b01111111, "utf32_turkish_ci"}}, isdefault: 0b00000000},
- 170: {alias: []collalias{{0b01111111, "utf32_czech_ci"}}, isdefault: 0b00000000},
- 171: {alias: []collalias{{0b01111111, "utf32_danish_ci"}}, isdefault: 0b00000000},
- 172: {alias: []collalias{{0b01111111, "utf32_lithuanian_ci"}}, isdefault: 0b00000000},
- 173: {alias: []collalias{{0b01111111, "utf32_slovak_ci"}}, isdefault: 0b00000000},
- 174: {alias: []collalias{{0b01111111, "utf32_spanish2_ci"}}, isdefault: 0b00000000},
- 175: {alias: []collalias{{0b01111111, "utf32_roman_ci"}}, isdefault: 0b00000000},
- 176: {alias: []collalias{{0b01111111, "utf32_persian_ci"}}, isdefault: 0b00000000},
- 177: {alias: []collalias{{0b01111111, "utf32_esperanto_ci"}}, isdefault: 0b00000000},
- 178: {alias: []collalias{{0b01111111, "utf32_hungarian_ci"}}, isdefault: 0b00000000},
- 179: {alias: []collalias{{0b01111111, "utf32_sinhala_ci"}}, isdefault: 0b00000000},
- 180: {alias: []collalias{{0b01111111, "utf32_german2_ci"}}, isdefault: 0b00000000},
- 181: {alias: []collalias{{0b01110000, "utf32_croatian_ci"}, {0b00001111, "utf32_croatian_mysql561_ci"}}, isdefault: 0b00000000},
- 182: {alias: []collalias{{0b01111111, "utf32_unicode_520_ci"}}, isdefault: 0b00000000},
- 183: {alias: []collalias{{0b01111111, "utf32_vietnamese_ci"}}, isdefault: 0b00000000},
- 192: {alias: []collalias{{0b01111111, "utf8_unicode_ci"}, {0b01111111, "utf8mb3_unicode_ci"}}, isdefault: 0b00000000},
- 193: {alias: []collalias{{0b01111111, "utf8_icelandic_ci"}, {0b01111111, "utf8mb3_icelandic_ci"}}, isdefault: 0b00000000},
- 194: {alias: []collalias{{0b01111111, "utf8_latvian_ci"}, {0b01111111, "utf8mb3_latvian_ci"}}, isdefault: 0b00000000},
- 195: {alias: []collalias{{0b01111111, "utf8_romanian_ci"}, {0b01111111, "utf8mb3_romanian_ci"}}, isdefault: 0b00000000},
- 196: {alias: []collalias{{0b01111111, "utf8_slovenian_ci"}, {0b01111111, "utf8mb3_slovenian_ci"}}, isdefault: 0b00000000},
- 197: {alias: []collalias{{0b01111111, "utf8_polish_ci"}, {0b01111111, "utf8mb3_polish_ci"}}, isdefault: 0b00000000},
- 198: {alias: []collalias{{0b01111111, "utf8_estonian_ci"}, {0b01111111, "utf8mb3_estonian_ci"}}, isdefault: 0b00000000},
- 199: {alias: []collalias{{0b01111111, "utf8_spanish_ci"}, {0b01111111, "utf8mb3_spanish_ci"}}, isdefault: 0b00000000},
- 200: {alias: []collalias{{0b01111111, "utf8_swedish_ci"}, {0b01111111, "utf8mb3_swedish_ci"}}, isdefault: 0b00000000},
- 201: {alias: []collalias{{0b01111111, "utf8_turkish_ci"}, {0b01111111, "utf8mb3_turkish_ci"}}, isdefault: 0b00000000},
- 202: {alias: []collalias{{0b01111111, "utf8_czech_ci"}, {0b01111111, "utf8mb3_czech_ci"}}, isdefault: 0b00000000},
- 203: {alias: []collalias{{0b01111111, "utf8_danish_ci"}, {0b01111111, "utf8mb3_danish_ci"}}, isdefault: 0b00000000},
- 204: {alias: []collalias{{0b01111111, "utf8_lithuanian_ci"}, {0b01111111, "utf8mb3_lithuanian_ci"}}, isdefault: 0b00000000},
- 205: {alias: []collalias{{0b01111111, "utf8_slovak_ci"}, {0b01111111, "utf8mb3_slovak_ci"}}, isdefault: 0b00000000},
- 206: {alias: []collalias{{0b01111111, "utf8_spanish2_ci"}, {0b01111111, "utf8mb3_spanish2_ci"}}, isdefault: 0b00000000},
- 207: {alias: []collalias{{0b01111111, "utf8_roman_ci"}, {0b01111111, "utf8mb3_roman_ci"}}, isdefault: 0b00000000},
- 208: {alias: []collalias{{0b01111111, "utf8_persian_ci"}, {0b01111111, "utf8mb3_persian_ci"}}, isdefault: 0b00000000},
- 209: {alias: []collalias{{0b01111111, "utf8_esperanto_ci"}, {0b01111111, "utf8mb3_esperanto_ci"}}, isdefault: 0b00000000},
- 210: {alias: []collalias{{0b01111111, "utf8_hungarian_ci"}, {0b01111111, "utf8mb3_hungarian_ci"}}, isdefault: 0b00000000},
- 211: {alias: []collalias{{0b01111111, "utf8_sinhala_ci"}, {0b01111111, "utf8mb3_sinhala_ci"}}, isdefault: 0b00000000},
- 212: {alias: []collalias{{0b01111111, "utf8_german2_ci"}, {0b01111111, "utf8mb3_german2_ci"}}, isdefault: 0b00000000},
- 213: {alias: []collalias{{0b01110000, "utf8_croatian_ci"}, {0b00001111, "utf8_croatian_mysql561_ci"}, {0b01110000, "utf8mb3_croatian_ci"}, {0b00001111, "utf8mb3_croatian_mysql561_ci"}}, isdefault: 0b00000000},
- 214: {alias: []collalias{{0b01111111, "utf8_unicode_520_ci"}, {0b01111111, "utf8mb3_unicode_520_ci"}}, isdefault: 0b00000000},
- 215: {alias: []collalias{{0b01111111, "utf8_vietnamese_ci"}, {0b01111111, "utf8mb3_vietnamese_ci"}}, isdefault: 0b00000000},
- 223: {alias: []collalias{{0b01111111, "utf8_general_mysql500_ci"}, {0b01111111, "utf8mb3_general_mysql500_ci"}}, isdefault: 0b00000000},
- 224: {alias: []collalias{{0b01111111, "utf8mb4_unicode_ci"}}, isdefault: 0b00000000},
- 225: {alias: []collalias{{0b01111111, "utf8mb4_icelandic_ci"}}, isdefault: 0b00000000},
- 226: {alias: []collalias{{0b01111111, "utf8mb4_latvian_ci"}}, isdefault: 0b00000000},
- 227: {alias: []collalias{{0b01111111, "utf8mb4_romanian_ci"}}, isdefault: 0b00000000},
- 228: {alias: []collalias{{0b01111111, "utf8mb4_slovenian_ci"}}, isdefault: 0b00000000},
- 229: {alias: []collalias{{0b01111111, "utf8mb4_polish_ci"}}, isdefault: 0b00000000},
- 230: {alias: []collalias{{0b01111111, "utf8mb4_estonian_ci"}}, isdefault: 0b00000000},
- 231: {alias: []collalias{{0b01111111, "utf8mb4_spanish_ci"}}, isdefault: 0b00000000},
- 232: {alias: []collalias{{0b01111111, "utf8mb4_swedish_ci"}}, isdefault: 0b00000000},
- 233: {alias: []collalias{{0b01111111, "utf8mb4_turkish_ci"}}, isdefault: 0b00000000},
- 234: {alias: []collalias{{0b01111111, "utf8mb4_czech_ci"}}, isdefault: 0b00000000},
- 235: {alias: []collalias{{0b01111111, "utf8mb4_danish_ci"}}, isdefault: 0b00000000},
- 236: {alias: []collalias{{0b01111111, "utf8mb4_lithuanian_ci"}}, isdefault: 0b00000000},
- 237: {alias: []collalias{{0b01111111, "utf8mb4_slovak_ci"}}, isdefault: 0b00000000},
- 238: {alias: []collalias{{0b01111111, "utf8mb4_spanish2_ci"}}, isdefault: 0b00000000},
- 239: {alias: []collalias{{0b01111111, "utf8mb4_roman_ci"}}, isdefault: 0b00000000},
- 240: {alias: []collalias{{0b01111111, "utf8mb4_persian_ci"}}, isdefault: 0b00000000},
- 241: {alias: []collalias{{0b01111111, "utf8mb4_esperanto_ci"}}, isdefault: 0b00000000},
- 242: {alias: []collalias{{0b01111111, "utf8mb4_hungarian_ci"}}, isdefault: 0b00000000},
- 243: {alias: []collalias{{0b01111111, "utf8mb4_sinhala_ci"}}, isdefault: 0b00000000},
- 244: {alias: []collalias{{0b01111111, "utf8mb4_german2_ci"}}, isdefault: 0b00000000},
- 245: {alias: []collalias{{0b01110000, "utf8mb4_croatian_ci"}, {0b00001111, "utf8mb4_croatian_mysql561_ci"}}, isdefault: 0b00000000},
- 246: {alias: []collalias{{0b01111111, "utf8mb4_unicode_520_ci"}}, isdefault: 0b00000000},
- 247: {alias: []collalias{{0b01111111, "utf8mb4_vietnamese_ci"}}, isdefault: 0b00000000},
- 248: {alias: []collalias{{0b01100000, "gb18030_chinese_ci"}}, isdefault: 0b01100000},
- 249: {alias: []collalias{{0b01100000, "gb18030_bin"}}, isdefault: 0b00000000},
- 250: {alias: []collalias{{0b01100000, "gb18030_unicode_520_ci"}}, isdefault: 0b00000000},
- 255: {alias: []collalias{{0b01000000, "utf8mb4_0900_ai_ci"}}, isdefault: 0b01000000},
- 256: {alias: []collalias{{0b01000000, "utf8mb4_de_pb_0900_ai_ci"}}, isdefault: 0b00000000},
- 257: {alias: []collalias{{0b01000000, "utf8mb4_is_0900_ai_ci"}}, isdefault: 0b00000000},
- 258: {alias: []collalias{{0b01000000, "utf8mb4_lv_0900_ai_ci"}}, isdefault: 0b00000000},
- 259: {alias: []collalias{{0b01000000, "utf8mb4_ro_0900_ai_ci"}}, isdefault: 0b00000000},
- 260: {alias: []collalias{{0b01000000, "utf8mb4_sl_0900_ai_ci"}}, isdefault: 0b00000000},
- 261: {alias: []collalias{{0b01000000, "utf8mb4_pl_0900_ai_ci"}}, isdefault: 0b00000000},
- 262: {alias: []collalias{{0b01000000, "utf8mb4_et_0900_ai_ci"}}, isdefault: 0b00000000},
- 263: {alias: []collalias{{0b01000000, "utf8mb4_es_0900_ai_ci"}}, isdefault: 0b00000000},
- 264: {alias: []collalias{{0b01000000, "utf8mb4_sv_0900_ai_ci"}}, isdefault: 0b00000000},
- 265: {alias: []collalias{{0b01000000, "utf8mb4_tr_0900_ai_ci"}}, isdefault: 0b00000000},
- 266: {alias: []collalias{{0b01000000, "utf8mb4_cs_0900_ai_ci"}}, isdefault: 0b00000000},
- 267: {alias: []collalias{{0b01000000, "utf8mb4_da_0900_ai_ci"}}, isdefault: 0b00000000},
- 268: {alias: []collalias{{0b01000000, "utf8mb4_lt_0900_ai_ci"}}, isdefault: 0b00000000},
- 269: {alias: []collalias{{0b01000000, "utf8mb4_sk_0900_ai_ci"}}, isdefault: 0b00000000},
- 270: {alias: []collalias{{0b01000000, "utf8mb4_es_trad_0900_ai_ci"}}, isdefault: 0b00000000},
- 271: {alias: []collalias{{0b01000000, "utf8mb4_la_0900_ai_ci"}}, isdefault: 0b00000000},
- 273: {alias: []collalias{{0b01000000, "utf8mb4_eo_0900_ai_ci"}}, isdefault: 0b00000000},
- 274: {alias: []collalias{{0b01000000, "utf8mb4_hu_0900_ai_ci"}}, isdefault: 0b00000000},
- 275: {alias: []collalias{{0b01000000, "utf8mb4_hr_0900_ai_ci"}}, isdefault: 0b00000000},
- 277: {alias: []collalias{{0b01000000, "utf8mb4_vi_0900_ai_ci"}}, isdefault: 0b00000000},
- 278: {alias: []collalias{{0b01000000, "utf8mb4_0900_as_cs"}}, isdefault: 0b00000000},
- 279: {alias: []collalias{{0b01000000, "utf8mb4_de_pb_0900_as_cs"}}, isdefault: 0b00000000},
- 280: {alias: []collalias{{0b01000000, "utf8mb4_is_0900_as_cs"}}, isdefault: 0b00000000},
- 281: {alias: []collalias{{0b01000000, "utf8mb4_lv_0900_as_cs"}}, isdefault: 0b00000000},
- 282: {alias: []collalias{{0b01000000, "utf8mb4_ro_0900_as_cs"}}, isdefault: 0b00000000},
- 283: {alias: []collalias{{0b01000000, "utf8mb4_sl_0900_as_cs"}}, isdefault: 0b00000000},
- 284: {alias: []collalias{{0b01000000, "utf8mb4_pl_0900_as_cs"}}, isdefault: 0b00000000},
- 285: {alias: []collalias{{0b01000000, "utf8mb4_et_0900_as_cs"}}, isdefault: 0b00000000},
- 286: {alias: []collalias{{0b01000000, "utf8mb4_es_0900_as_cs"}}, isdefault: 0b00000000},
- 287: {alias: []collalias{{0b01000000, "utf8mb4_sv_0900_as_cs"}}, isdefault: 0b00000000},
- 288: {alias: []collalias{{0b01000000, "utf8mb4_tr_0900_as_cs"}}, isdefault: 0b00000000},
- 289: {alias: []collalias{{0b01000000, "utf8mb4_cs_0900_as_cs"}}, isdefault: 0b00000000},
- 290: {alias: []collalias{{0b01000000, "utf8mb4_da_0900_as_cs"}}, isdefault: 0b00000000},
- 291: {alias: []collalias{{0b01000000, "utf8mb4_lt_0900_as_cs"}}, isdefault: 0b00000000},
- 292: {alias: []collalias{{0b01000000, "utf8mb4_sk_0900_as_cs"}}, isdefault: 0b00000000},
- 293: {alias: []collalias{{0b01000000, "utf8mb4_es_trad_0900_as_cs"}}, isdefault: 0b00000000},
- 294: {alias: []collalias{{0b01000000, "utf8mb4_la_0900_as_cs"}}, isdefault: 0b00000000},
- 296: {alias: []collalias{{0b01000000, "utf8mb4_eo_0900_as_cs"}}, isdefault: 0b00000000},
- 297: {alias: []collalias{{0b01000000, "utf8mb4_hu_0900_as_cs"}}, isdefault: 0b00000000},
- 298: {alias: []collalias{{0b01000000, "utf8mb4_hr_0900_as_cs"}}, isdefault: 0b00000000},
- 300: {alias: []collalias{{0b01000000, "utf8mb4_vi_0900_as_cs"}}, isdefault: 0b00000000},
- 303: {alias: []collalias{{0b01000000, "utf8mb4_ja_0900_as_cs"}}, isdefault: 0b00000000},
- 304: {alias: []collalias{{0b01000000, "utf8mb4_ja_0900_as_cs_ks"}}, isdefault: 0b00000000},
- 305: {alias: []collalias{{0b01000000, "utf8mb4_0900_as_ci"}}, isdefault: 0b00000000},
- 306: {alias: []collalias{{0b01000000, "utf8mb4_ru_0900_ai_ci"}}, isdefault: 0b00000000},
- 307: {alias: []collalias{{0b01000000, "utf8mb4_ru_0900_as_cs"}}, isdefault: 0b00000000},
- 308: {alias: []collalias{{0b01000000, "utf8mb4_zh_0900_as_cs"}}, isdefault: 0b00000000},
- 309: {alias: []collalias{{0b01000000, "utf8mb4_0900_bin"}}, isdefault: 0b00000000},
- 310: {alias: []collalias{{0b01000000, "utf8mb4_nb_0900_ai_ci"}}, isdefault: 0b00000000},
- 311: {alias: []collalias{{0b01000000, "utf8mb4_nb_0900_as_cs"}}, isdefault: 0b00000000},
- 312: {alias: []collalias{{0b01000000, "utf8mb4_nn_0900_ai_ci"}}, isdefault: 0b00000000},
- 313: {alias: []collalias{{0b01000000, "utf8mb4_nn_0900_as_cs"}}, isdefault: 0b00000000},
- 314: {alias: []collalias{{0b01000000, "utf8mb4_sr_latn_0900_ai_ci"}}, isdefault: 0b00000000},
- 315: {alias: []collalias{{0b01000000, "utf8mb4_sr_latn_0900_as_cs"}}, isdefault: 0b00000000},
- 316: {alias: []collalias{{0b01000000, "utf8mb4_bs_0900_ai_ci"}}, isdefault: 0b00000000},
- 317: {alias: []collalias{{0b01000000, "utf8mb4_bs_0900_as_cs"}}, isdefault: 0b00000000},
- 318: {alias: []collalias{{0b01000000, "utf8mb4_bg_0900_ai_ci"}}, isdefault: 0b00000000},
- 319: {alias: []collalias{{0b01000000, "utf8mb4_bg_0900_as_cs"}}, isdefault: 0b00000000},
- 320: {alias: []collalias{{0b01000000, "utf8mb4_gl_0900_ai_ci"}}, isdefault: 0b00000000},
- 321: {alias: []collalias{{0b01000000, "utf8mb4_gl_0900_as_cs"}}, isdefault: 0b00000000},
- 322: {alias: []collalias{{0b01000000, "utf8mb4_mn_cyrl_0900_ai_ci"}}, isdefault: 0b00000000},
- 323: {alias: []collalias{{0b01000000, "utf8mb4_mn_cyrl_0900_as_cs"}}, isdefault: 0b00000000},
- 576: {alias: []collalias{{0b00001111, "utf8_croatian_ci"}, {0b00001111, "utf8mb3_croatian_ci"}}, isdefault: 0b00000000},
- 577: {alias: []collalias{{0b00001111, "utf8_myanmar_ci"}, {0b00001111, "utf8mb3_myanmar_ci"}}, isdefault: 0b00000000},
- 578: {alias: []collalias{{0b00001110, "utf8_thai_520_w2"}, {0b00001110, "utf8mb3_thai_520_w2"}}, isdefault: 0b00000000},
- 608: {alias: []collalias{{0b00001111, "utf8mb4_croatian_ci"}}, isdefault: 0b00000000},
- 609: {alias: []collalias{{0b00001111, "utf8mb4_myanmar_ci"}}, isdefault: 0b00000000},
- 610: {alias: []collalias{{0b00001110, "utf8mb4_thai_520_w2"}}, isdefault: 0b00000000},
- 640: {alias: []collalias{{0b00001111, "ucs2_croatian_ci"}}, isdefault: 0b00000000},
- 641: {alias: []collalias{{0b00001111, "ucs2_myanmar_ci"}}, isdefault: 0b00000000},
- 642: {alias: []collalias{{0b00001110, "ucs2_thai_520_w2"}}, isdefault: 0b00000000},
- 672: {alias: []collalias{{0b00001111, "utf16_croatian_ci"}}, isdefault: 0b00000000},
- 673: {alias: []collalias{{0b00001111, "utf16_myanmar_ci"}}, isdefault: 0b00000000},
- 674: {alias: []collalias{{0b00001110, "utf16_thai_520_w2"}}, isdefault: 0b00000000},
- 736: {alias: []collalias{{0b00001111, "utf32_croatian_ci"}}, isdefault: 0b00000000},
- 737: {alias: []collalias{{0b00001111, "utf32_myanmar_ci"}}, isdefault: 0b00000000},
- 738: {alias: []collalias{{0b00001110, "utf32_thai_520_w2"}}, isdefault: 0b00000000},
- 1025: {alias: []collalias{{0b00001100, "big5_chinese_nopad_ci"}}, isdefault: 0b00000000},
- 1027: {alias: []collalias{{0b00001100, "dec8_swedish_nopad_ci"}}, isdefault: 0b00000000},
- 1028: {alias: []collalias{{0b00001100, "cp850_general_nopad_ci"}}, isdefault: 0b00000000},
- 1030: {alias: []collalias{{0b00001100, "hp8_english_nopad_ci"}}, isdefault: 0b00000000},
- 1031: {alias: []collalias{{0b00001100, "koi8r_general_nopad_ci"}}, isdefault: 0b00000000},
- 1032: {alias: []collalias{{0b00001100, "latin1_swedish_nopad_ci"}}, isdefault: 0b00000000},
- 1033: {alias: []collalias{{0b00001100, "latin2_general_nopad_ci"}}, isdefault: 0b00000000},
- 1034: {alias: []collalias{{0b00001100, "swe7_swedish_nopad_ci"}}, isdefault: 0b00000000},
- 1035: {alias: []collalias{{0b00001100, "ascii_general_nopad_ci"}}, isdefault: 0b00000000},
- 1036: {alias: []collalias{{0b00001100, "ujis_japanese_nopad_ci"}}, isdefault: 0b00000000},
- 1037: {alias: []collalias{{0b00001100, "sjis_japanese_nopad_ci"}}, isdefault: 0b00000000},
- 1040: {alias: []collalias{{0b00001100, "hebrew_general_nopad_ci"}}, isdefault: 0b00000000},
- 1042: {alias: []collalias{{0b00001100, "tis620_thai_nopad_ci"}}, isdefault: 0b00000000},
- 1043: {alias: []collalias{{0b00001100, "euckr_korean_nopad_ci"}}, isdefault: 0b00000000},
- 1046: {alias: []collalias{{0b00001100, "koi8u_general_nopad_ci"}}, isdefault: 0b00000000},
- 1048: {alias: []collalias{{0b00001100, "gb2312_chinese_nopad_ci"}}, isdefault: 0b00000000},
- 1049: {alias: []collalias{{0b00001100, "greek_general_nopad_ci"}}, isdefault: 0b00000000},
- 1050: {alias: []collalias{{0b00001100, "cp1250_general_nopad_ci"}}, isdefault: 0b00000000},
- 1052: {alias: []collalias{{0b00001100, "gbk_chinese_nopad_ci"}}, isdefault: 0b00000000},
- 1054: {alias: []collalias{{0b00001100, "latin5_turkish_nopad_ci"}}, isdefault: 0b00000000},
- 1056: {alias: []collalias{{0b00001100, "armscii8_general_nopad_ci"}}, isdefault: 0b00000000},
- 1057: {alias: []collalias{{0b00001100, "utf8_general_nopad_ci"}, {0b00001100, "utf8mb3_general_nopad_ci"}}, isdefault: 0b00000000},
- 1059: {alias: []collalias{{0b00001100, "ucs2_general_nopad_ci"}}, isdefault: 0b00000000},
- 1060: {alias: []collalias{{0b00001100, "cp866_general_nopad_ci"}}, isdefault: 0b00000000},
- 1061: {alias: []collalias{{0b00001100, "keybcs2_general_nopad_ci"}}, isdefault: 0b00000000},
- 1062: {alias: []collalias{{0b00001100, "macce_general_nopad_ci"}}, isdefault: 0b00000000},
- 1063: {alias: []collalias{{0b00001100, "macroman_general_nopad_ci"}}, isdefault: 0b00000000},
- 1064: {alias: []collalias{{0b00001100, "cp852_general_nopad_ci"}}, isdefault: 0b00000000},
- 1065: {alias: []collalias{{0b00001100, "latin7_general_nopad_ci"}}, isdefault: 0b00000000},
- 1067: {alias: []collalias{{0b00001100, "macce_nopad_bin"}}, isdefault: 0b00000000},
- 1069: {alias: []collalias{{0b00001100, "utf8mb4_general_nopad_ci"}}, isdefault: 0b00000000},
- 1070: {alias: []collalias{{0b00001100, "utf8mb4_nopad_bin"}}, isdefault: 0b00000000},
- 1071: {alias: []collalias{{0b00001100, "latin1_nopad_bin"}}, isdefault: 0b00000000},
- 1074: {alias: []collalias{{0b00001100, "cp1251_nopad_bin"}}, isdefault: 0b00000000},
- 1075: {alias: []collalias{{0b00001100, "cp1251_general_nopad_ci"}}, isdefault: 0b00000000},
- 1077: {alias: []collalias{{0b00001100, "macroman_nopad_bin"}}, isdefault: 0b00000000},
- 1078: {alias: []collalias{{0b00001100, "utf16_general_nopad_ci"}}, isdefault: 0b00000000},
- 1079: {alias: []collalias{{0b00001100, "utf16_nopad_bin"}}, isdefault: 0b00000000},
- 1080: {alias: []collalias{{0b00001100, "utf16le_general_nopad_ci"}}, isdefault: 0b00000000},
- 1081: {alias: []collalias{{0b00001100, "cp1256_general_nopad_ci"}}, isdefault: 0b00000000},
- 1082: {alias: []collalias{{0b00001100, "cp1257_nopad_bin"}}, isdefault: 0b00000000},
- 1083: {alias: []collalias{{0b00001100, "cp1257_general_nopad_ci"}}, isdefault: 0b00000000},
- 1084: {alias: []collalias{{0b00001100, "utf32_general_nopad_ci"}}, isdefault: 0b00000000},
- 1085: {alias: []collalias{{0b00001100, "utf32_nopad_bin"}}, isdefault: 0b00000000},
- 1086: {alias: []collalias{{0b00001100, "utf16le_nopad_bin"}}, isdefault: 0b00000000},
- 1088: {alias: []collalias{{0b00001100, "armscii8_nopad_bin"}}, isdefault: 0b00000000},
- 1089: {alias: []collalias{{0b00001100, "ascii_nopad_bin"}}, isdefault: 0b00000000},
- 1090: {alias: []collalias{{0b00001100, "cp1250_nopad_bin"}}, isdefault: 0b00000000},
- 1091: {alias: []collalias{{0b00001100, "cp1256_nopad_bin"}}, isdefault: 0b00000000},
- 1092: {alias: []collalias{{0b00001100, "cp866_nopad_bin"}}, isdefault: 0b00000000},
- 1093: {alias: []collalias{{0b00001100, "dec8_nopad_bin"}}, isdefault: 0b00000000},
- 1094: {alias: []collalias{{0b00001100, "greek_nopad_bin"}}, isdefault: 0b00000000},
- 1095: {alias: []collalias{{0b00001100, "hebrew_nopad_bin"}}, isdefault: 0b00000000},
- 1096: {alias: []collalias{{0b00001100, "hp8_nopad_bin"}}, isdefault: 0b00000000},
- 1097: {alias: []collalias{{0b00001100, "keybcs2_nopad_bin"}}, isdefault: 0b00000000},
- 1098: {alias: []collalias{{0b00001100, "koi8r_nopad_bin"}}, isdefault: 0b00000000},
- 1099: {alias: []collalias{{0b00001100, "koi8u_nopad_bin"}}, isdefault: 0b00000000},
- 1101: {alias: []collalias{{0b00001100, "latin2_nopad_bin"}}, isdefault: 0b00000000},
- 1102: {alias: []collalias{{0b00001100, "latin5_nopad_bin"}}, isdefault: 0b00000000},
- 1103: {alias: []collalias{{0b00001100, "latin7_nopad_bin"}}, isdefault: 0b00000000},
- 1104: {alias: []collalias{{0b00001100, "cp850_nopad_bin"}}, isdefault: 0b00000000},
- 1105: {alias: []collalias{{0b00001100, "cp852_nopad_bin"}}, isdefault: 0b00000000},
- 1106: {alias: []collalias{{0b00001100, "swe7_nopad_bin"}}, isdefault: 0b00000000},
- 1107: {alias: []collalias{{0b00001100, "utf8_nopad_bin"}, {0b00001100, "utf8mb3_nopad_bin"}}, isdefault: 0b00000000},
- 1108: {alias: []collalias{{0b00001100, "big5_nopad_bin"}}, isdefault: 0b00000000},
- 1109: {alias: []collalias{{0b00001100, "euckr_nopad_bin"}}, isdefault: 0b00000000},
- 1110: {alias: []collalias{{0b00001100, "gb2312_nopad_bin"}}, isdefault: 0b00000000},
- 1111: {alias: []collalias{{0b00001100, "gbk_nopad_bin"}}, isdefault: 0b00000000},
- 1112: {alias: []collalias{{0b00001100, "sjis_nopad_bin"}}, isdefault: 0b00000000},
- 1113: {alias: []collalias{{0b00001100, "tis620_nopad_bin"}}, isdefault: 0b00000000},
- 1114: {alias: []collalias{{0b00001100, "ucs2_nopad_bin"}}, isdefault: 0b00000000},
- 1115: {alias: []collalias{{0b00001100, "ujis_nopad_bin"}}, isdefault: 0b00000000},
- 1116: {alias: []collalias{{0b00001100, "geostd8_general_nopad_ci"}}, isdefault: 0b00000000},
- 1117: {alias: []collalias{{0b00001100, "geostd8_nopad_bin"}}, isdefault: 0b00000000},
- 1119: {alias: []collalias{{0b00001100, "cp932_japanese_nopad_ci"}}, isdefault: 0b00000000},
- 1120: {alias: []collalias{{0b00001100, "cp932_nopad_bin"}}, isdefault: 0b00000000},
- 1121: {alias: []collalias{{0b00001100, "eucjpms_japanese_nopad_ci"}}, isdefault: 0b00000000},
- 1122: {alias: []collalias{{0b00001100, "eucjpms_nopad_bin"}}, isdefault: 0b00000000},
- 1125: {alias: []collalias{{0b00001100, "utf16_unicode_nopad_ci"}}, isdefault: 0b00000000},
- 1147: {alias: []collalias{{0b00001100, "utf16_unicode_520_nopad_ci"}}, isdefault: 0b00000000},
- 1152: {alias: []collalias{{0b00001100, "ucs2_unicode_nopad_ci"}}, isdefault: 0b00000000},
- 1174: {alias: []collalias{{0b00001100, "ucs2_unicode_520_nopad_ci"}}, isdefault: 0b00000000},
- 1184: {alias: []collalias{{0b00001100, "utf32_unicode_nopad_ci"}}, isdefault: 0b00000000},
- 1206: {alias: []collalias{{0b00001100, "utf32_unicode_520_nopad_ci"}}, isdefault: 0b00000000},
- 1216: {alias: []collalias{{0b00001100, "utf8_unicode_nopad_ci"}, {0b00001100, "utf8mb3_unicode_nopad_ci"}}, isdefault: 0b00000000},
- 1238: {alias: []collalias{{0b00001100, "utf8_unicode_520_nopad_ci"}, {0b00001100, "utf8mb3_unicode_520_nopad_ci"}}, isdefault: 0b00000000},
- 1248: {alias: []collalias{{0b00001100, "utf8mb4_unicode_nopad_ci"}}, isdefault: 0b00000000},
- 1270: {alias: []collalias{{0b00001100, "utf8mb4_unicode_520_nopad_ci"}}, isdefault: 0b00000000},
+ 1: {alias: []collalias{{0b01111111, "big5_chinese_ci", "big5"}}, isdefault: 0b01111111},
+ 2: {alias: []collalias{{0b01111111, "latin2_czech_cs", "latin2"}}, isdefault: 0b00000000},
+ 3: {alias: []collalias{{0b01111111, "dec8_swedish_ci", "dec8"}}, isdefault: 0b01111111},
+ 4: {alias: []collalias{{0b01111111, "cp850_general_ci", "cp850"}}, isdefault: 0b01111111},
+ 5: {alias: []collalias{{0b01111111, "latin1_german1_ci", "latin1"}}, isdefault: 0b00000000},
+ 6: {alias: []collalias{{0b01111111, "hp8_english_ci", "hp8"}}, isdefault: 0b01111111},
+ 7: {alias: []collalias{{0b01111111, "koi8r_general_ci", "koi8r"}}, isdefault: 0b01111111},
+ 8: {alias: []collalias{{0b01111111, "latin1_swedish_ci", "latin1"}}, isdefault: 0b01111111},
+ 9: {alias: []collalias{{0b01111111, "latin2_general_ci", "latin2"}}, isdefault: 0b01111111},
+ 10: {alias: []collalias{{0b01111111, "swe7_swedish_ci", "swe7"}}, isdefault: 0b01111111},
+ 11: {alias: []collalias{{0b01111111, "ascii_general_ci", "ascii"}}, isdefault: 0b01111111},
+ 12: {alias: []collalias{{0b01111111, "ujis_japanese_ci", "ujis"}}, isdefault: 0b01111111},
+ 13: {alias: []collalias{{0b01111111, "sjis_japanese_ci", "sjis"}}, isdefault: 0b01111111},
+ 14: {alias: []collalias{{0b01111111, "cp1251_bulgarian_ci", "cp1251"}}, isdefault: 0b00000000},
+ 15: {alias: []collalias{{0b01111111, "latin1_danish_ci", "latin1"}}, isdefault: 0b00000000},
+ 16: {alias: []collalias{{0b01111111, "hebrew_general_ci", "hebrew"}}, isdefault: 0b01111111},
+ 18: {alias: []collalias{{0b01111111, "tis620_thai_ci", "tis620"}}, isdefault: 0b01111111},
+ 19: {alias: []collalias{{0b01111111, "euckr_korean_ci", "euckr"}}, isdefault: 0b01111111},
+ 20: {alias: []collalias{{0b01111111, "latin7_estonian_cs", "latin7"}}, isdefault: 0b00000000},
+ 21: {alias: []collalias{{0b01111111, "latin2_hungarian_ci", "latin2"}}, isdefault: 0b00000000},
+ 22: {alias: []collalias{{0b01111111, "koi8u_general_ci", "koi8u"}}, isdefault: 0b01111111},
+ 23: {alias: []collalias{{0b01111111, "cp1251_ukrainian_ci", "cp1251"}}, isdefault: 0b00000000},
+ 24: {alias: []collalias{{0b01111111, "gb2312_chinese_ci", "gb2312"}}, isdefault: 0b01111111},
+ 25: {alias: []collalias{{0b01111111, "greek_general_ci", "greek"}}, isdefault: 0b01111111},
+ 26: {alias: []collalias{{0b01111111, "cp1250_general_ci", "cp1250"}}, isdefault: 0b01111111},
+ 27: {alias: []collalias{{0b01111111, "latin2_croatian_ci", "latin2"}}, isdefault: 0b00000000},
+ 28: {alias: []collalias{{0b01111111, "gbk_chinese_ci", "gbk"}}, isdefault: 0b01111111},
+ 29: {alias: []collalias{{0b01111111, "cp1257_lithuanian_ci", "cp1257"}}, isdefault: 0b00000000},
+ 30: {alias: []collalias{{0b01111111, "latin5_turkish_ci", "latin5"}}, isdefault: 0b01111111},
+ 31: {alias: []collalias{{0b01111111, "latin1_german2_ci", "latin1"}}, isdefault: 0b00000000},
+ 32: {alias: []collalias{{0b01111111, "armscii8_general_ci", "armscii8"}}, isdefault: 0b01111111},
+ 33: {alias: []collalias{{0b01111111, "utf8_general_ci", "utf8"}, {0b01111111, "utf8mb3_general_ci", "utf8mb3"}}, isdefault: 0b01111111},
+ 34: {alias: []collalias{{0b01111111, "cp1250_czech_cs", "cp1250"}}, isdefault: 0b00000000},
+ 35: {alias: []collalias{{0b01111111, "ucs2_general_ci", "ucs2"}}, isdefault: 0b01111111},
+ 36: {alias: []collalias{{0b01111111, "cp866_general_ci", "cp866"}}, isdefault: 0b01111111},
+ 37: {alias: []collalias{{0b01111111, "keybcs2_general_ci", "keybcs2"}}, isdefault: 0b01111111},
+ 38: {alias: []collalias{{0b01111111, "macce_general_ci", "macce"}}, isdefault: 0b01111111},
+ 39: {alias: []collalias{{0b01111111, "macroman_general_ci", "macroman"}}, isdefault: 0b01111111},
+ 40: {alias: []collalias{{0b01111111, "cp852_general_ci", "cp852"}}, isdefault: 0b01111111},
+ 41: {alias: []collalias{{0b01111111, "latin7_general_ci", "latin7"}}, isdefault: 0b01111111},
+ 42: {alias: []collalias{{0b01111111, "latin7_general_cs", "latin7"}}, isdefault: 0b00000000},
+ 43: {alias: []collalias{{0b01111111, "macce_bin", "macce"}}, isdefault: 0b00000000},
+ 44: {alias: []collalias{{0b01111111, "cp1250_croatian_ci", "cp1250"}}, isdefault: 0b00000000},
+ 45: {alias: []collalias{{0b01111111, "utf8mb4_general_ci", "utf8mb4"}}, isdefault: 0b00111111},
+ 46: {alias: []collalias{{0b01111111, "utf8mb4_bin", "utf8mb4"}}, isdefault: 0b00000000},
+ 47: {alias: []collalias{{0b01111111, "latin1_bin", "latin1"}}, isdefault: 0b00000000},
+ 48: {alias: []collalias{{0b01111111, "latin1_general_ci", "latin1"}}, isdefault: 0b00000000},
+ 49: {alias: []collalias{{0b01111111, "latin1_general_cs", "latin1"}}, isdefault: 0b00000000},
+ 50: {alias: []collalias{{0b01111111, "cp1251_bin", "cp1251"}}, isdefault: 0b00000000},
+ 51: {alias: []collalias{{0b01111111, "cp1251_general_ci", "cp1251"}}, isdefault: 0b01111111},
+ 52: {alias: []collalias{{0b01111111, "cp1251_general_cs", "cp1251"}}, isdefault: 0b00000000},
+ 53: {alias: []collalias{{0b01111111, "macroman_bin", "macroman"}}, isdefault: 0b00000000},
+ 54: {alias: []collalias{{0b01111111, "utf16_general_ci", "utf16"}}, isdefault: 0b01111111},
+ 55: {alias: []collalias{{0b01111111, "utf16_bin", "utf16"}}, isdefault: 0b00000000},
+ 56: {alias: []collalias{{0b01111111, "utf16le_general_ci", "utf16le"}}, isdefault: 0b01111111},
+ 57: {alias: []collalias{{0b01111111, "cp1256_general_ci", "cp1256"}}, isdefault: 0b01111111},
+ 58: {alias: []collalias{{0b01111111, "cp1257_bin", "cp1257"}}, isdefault: 0b00000000},
+ 59: {alias: []collalias{{0b01111111, "cp1257_general_ci", "cp1257"}}, isdefault: 0b01111111},
+ 60: {alias: []collalias{{0b01111111, "utf32_general_ci", "utf32"}}, isdefault: 0b01111111},
+ 61: {alias: []collalias{{0b01111111, "utf32_bin", "utf32"}}, isdefault: 0b00000000},
+ 62: {alias: []collalias{{0b01111111, "utf16le_bin", "utf16le"}}, isdefault: 0b00000000},
+ 63: {alias: []collalias{{0b01111111, "binary", "binary"}}, isdefault: 0b01111111},
+ 64: {alias: []collalias{{0b01111111, "armscii8_bin", "armscii8"}}, isdefault: 0b00000000},
+ 65: {alias: []collalias{{0b01111111, "ascii_bin", "ascii"}}, isdefault: 0b00000000},
+ 66: {alias: []collalias{{0b01111111, "cp1250_bin", "cp1250"}}, isdefault: 0b00000000},
+ 67: {alias: []collalias{{0b01111111, "cp1256_bin", "cp1256"}}, isdefault: 0b00000000},
+ 68: {alias: []collalias{{0b01111111, "cp866_bin", "cp866"}}, isdefault: 0b00000000},
+ 69: {alias: []collalias{{0b01111111, "dec8_bin", "dec8"}}, isdefault: 0b00000000},
+ 70: {alias: []collalias{{0b01111111, "greek_bin", "greek"}}, isdefault: 0b00000000},
+ 71: {alias: []collalias{{0b01111111, "hebrew_bin", "hebrew"}}, isdefault: 0b00000000},
+ 72: {alias: []collalias{{0b01111111, "hp8_bin", "hp8"}}, isdefault: 0b00000000},
+ 73: {alias: []collalias{{0b01111111, "keybcs2_bin", "keybcs2"}}, isdefault: 0b00000000},
+ 74: {alias: []collalias{{0b01111111, "koi8r_bin", "koi8r"}}, isdefault: 0b00000000},
+ 75: {alias: []collalias{{0b01111111, "koi8u_bin", "koi8u"}}, isdefault: 0b00000000},
+ 76: {alias: []collalias{{0b01000000, "utf8_tolower_ci", "utf8"}, {0b01000000, "utf8mb3_tolower_ci", "utf8mb3"}}, isdefault: 0b00000000},
+ 77: {alias: []collalias{{0b01111111, "latin2_bin", "latin2"}}, isdefault: 0b00000000},
+ 78: {alias: []collalias{{0b01111111, "latin5_bin", "latin5"}}, isdefault: 0b00000000},
+ 79: {alias: []collalias{{0b01111111, "latin7_bin", "latin7"}}, isdefault: 0b00000000},
+ 80: {alias: []collalias{{0b01111111, "cp850_bin", "cp850"}}, isdefault: 0b00000000},
+ 81: {alias: []collalias{{0b01111111, "cp852_bin", "cp852"}}, isdefault: 0b00000000},
+ 82: {alias: []collalias{{0b01111111, "swe7_bin", "swe7"}}, isdefault: 0b00000000},
+ 83: {alias: []collalias{{0b01111111, "utf8_bin", "utf8"}, {0b01111111, "utf8mb3_bin", "utf8mb3"}}, isdefault: 0b00000000},
+ 84: {alias: []collalias{{0b01111111, "big5_bin", "big5"}}, isdefault: 0b00000000},
+ 85: {alias: []collalias{{0b01111111, "euckr_bin", "euckr"}}, isdefault: 0b00000000},
+ 86: {alias: []collalias{{0b01111111, "gb2312_bin", "gb2312"}}, isdefault: 0b00000000},
+ 87: {alias: []collalias{{0b01111111, "gbk_bin", "gbk"}}, isdefault: 0b00000000},
+ 88: {alias: []collalias{{0b01111111, "sjis_bin", "sjis"}}, isdefault: 0b00000000},
+ 89: {alias: []collalias{{0b01111111, "tis620_bin", "tis620"}}, isdefault: 0b00000000},
+ 90: {alias: []collalias{{0b01111111, "ucs2_bin", "ucs2"}}, isdefault: 0b00000000},
+ 91: {alias: []collalias{{0b01111111, "ujis_bin", "ujis"}}, isdefault: 0b00000000},
+ 92: {alias: []collalias{{0b01111111, "geostd8_general_ci", "geostd8"}}, isdefault: 0b01111111},
+ 93: {alias: []collalias{{0b01111111, "geostd8_bin", "geostd8"}}, isdefault: 0b00000000},
+ 94: {alias: []collalias{{0b01111111, "latin1_spanish_ci", "latin1"}}, isdefault: 0b00000000},
+ 95: {alias: []collalias{{0b01111111, "cp932_japanese_ci", "cp932"}}, isdefault: 0b01111111},
+ 96: {alias: []collalias{{0b01111111, "cp932_bin", "cp932"}}, isdefault: 0b00000000},
+ 97: {alias: []collalias{{0b01111111, "eucjpms_japanese_ci", "eucjpms"}}, isdefault: 0b01111111},
+ 98: {alias: []collalias{{0b01111111, "eucjpms_bin", "eucjpms"}}, isdefault: 0b00000000},
+ 99: {alias: []collalias{{0b01111111, "cp1250_polish_ci", "cp1250"}}, isdefault: 0b00000000},
+ 101: {alias: []collalias{{0b01111111, "utf16_unicode_ci", "utf16"}}, isdefault: 0b00000000},
+ 102: {alias: []collalias{{0b01111111, "utf16_icelandic_ci", "utf16"}}, isdefault: 0b00000000},
+ 103: {alias: []collalias{{0b01111111, "utf16_latvian_ci", "utf16"}}, isdefault: 0b00000000},
+ 104: {alias: []collalias{{0b01111111, "utf16_romanian_ci", "utf16"}}, isdefault: 0b00000000},
+ 105: {alias: []collalias{{0b01111111, "utf16_slovenian_ci", "utf16"}}, isdefault: 0b00000000},
+ 106: {alias: []collalias{{0b01111111, "utf16_polish_ci", "utf16"}}, isdefault: 0b00000000},
+ 107: {alias: []collalias{{0b01111111, "utf16_estonian_ci", "utf16"}}, isdefault: 0b00000000},
+ 108: {alias: []collalias{{0b01111111, "utf16_spanish_ci", "utf16"}}, isdefault: 0b00000000},
+ 109: {alias: []collalias{{0b01111111, "utf16_swedish_ci", "utf16"}}, isdefault: 0b00000000},
+ 110: {alias: []collalias{{0b01111111, "utf16_turkish_ci", "utf16"}}, isdefault: 0b00000000},
+ 111: {alias: []collalias{{0b01111111, "utf16_czech_ci", "utf16"}}, isdefault: 0b00000000},
+ 112: {alias: []collalias{{0b01111111, "utf16_danish_ci", "utf16"}}, isdefault: 0b00000000},
+ 113: {alias: []collalias{{0b01111111, "utf16_lithuanian_ci", "utf16"}}, isdefault: 0b00000000},
+ 114: {alias: []collalias{{0b01111111, "utf16_slovak_ci", "utf16"}}, isdefault: 0b00000000},
+ 115: {alias: []collalias{{0b01111111, "utf16_spanish2_ci", "utf16"}}, isdefault: 0b00000000},
+ 116: {alias: []collalias{{0b01111111, "utf16_roman_ci", "utf16"}}, isdefault: 0b00000000},
+ 117: {alias: []collalias{{0b01111111, "utf16_persian_ci", "utf16"}}, isdefault: 0b00000000},
+ 118: {alias: []collalias{{0b01111111, "utf16_esperanto_ci", "utf16"}}, isdefault: 0b00000000},
+ 119: {alias: []collalias{{0b01111111, "utf16_hungarian_ci", "utf16"}}, isdefault: 0b00000000},
+ 120: {alias: []collalias{{0b01111111, "utf16_sinhala_ci", "utf16"}}, isdefault: 0b00000000},
+ 121: {alias: []collalias{{0b01111111, "utf16_german2_ci", "utf16"}}, isdefault: 0b00000000},
+ 122: {alias: []collalias{{0b01110000, "utf16_croatian_ci", "utf16"}, {0b00001111, "utf16_croatian_mysql561_ci", "utf16"}}, isdefault: 0b00000000},
+ 123: {alias: []collalias{{0b01111111, "utf16_unicode_520_ci", "utf16"}}, isdefault: 0b00000000},
+ 124: {alias: []collalias{{0b01111111, "utf16_vietnamese_ci", "utf16"}}, isdefault: 0b00000000},
+ 128: {alias: []collalias{{0b01111111, "ucs2_unicode_ci", "ucs2"}}, isdefault: 0b00000000},
+ 129: {alias: []collalias{{0b01111111, "ucs2_icelandic_ci", "ucs2"}}, isdefault: 0b00000000},
+ 130: {alias: []collalias{{0b01111111, "ucs2_latvian_ci", "ucs2"}}, isdefault: 0b00000000},
+ 131: {alias: []collalias{{0b01111111, "ucs2_romanian_ci", "ucs2"}}, isdefault: 0b00000000},
+ 132: {alias: []collalias{{0b01111111, "ucs2_slovenian_ci", "ucs2"}}, isdefault: 0b00000000},
+ 133: {alias: []collalias{{0b01111111, "ucs2_polish_ci", "ucs2"}}, isdefault: 0b00000000},
+ 134: {alias: []collalias{{0b01111111, "ucs2_estonian_ci", "ucs2"}}, isdefault: 0b00000000},
+ 135: {alias: []collalias{{0b01111111, "ucs2_spanish_ci", "ucs2"}}, isdefault: 0b00000000},
+ 136: {alias: []collalias{{0b01111111, "ucs2_swedish_ci", "ucs2"}}, isdefault: 0b00000000},
+ 137: {alias: []collalias{{0b01111111, "ucs2_turkish_ci", "ucs2"}}, isdefault: 0b00000000},
+ 138: {alias: []collalias{{0b01111111, "ucs2_czech_ci", "ucs2"}}, isdefault: 0b00000000},
+ 139: {alias: []collalias{{0b01111111, "ucs2_danish_ci", "ucs2"}}, isdefault: 0b00000000},
+ 140: {alias: []collalias{{0b01111111, "ucs2_lithuanian_ci", "ucs2"}}, isdefault: 0b00000000},
+ 141: {alias: []collalias{{0b01111111, "ucs2_slovak_ci", "ucs2"}}, isdefault: 0b00000000},
+ 142: {alias: []collalias{{0b01111111, "ucs2_spanish2_ci", "ucs2"}}, isdefault: 0b00000000},
+ 143: {alias: []collalias{{0b01111111, "ucs2_roman_ci", "ucs2"}}, isdefault: 0b00000000},
+ 144: {alias: []collalias{{0b01111111, "ucs2_persian_ci", "ucs2"}}, isdefault: 0b00000000},
+ 145: {alias: []collalias{{0b01111111, "ucs2_esperanto_ci", "ucs2"}}, isdefault: 0b00000000},
+ 146: {alias: []collalias{{0b01111111, "ucs2_hungarian_ci", "ucs2"}}, isdefault: 0b00000000},
+ 147: {alias: []collalias{{0b01111111, "ucs2_sinhala_ci", "ucs2"}}, isdefault: 0b00000000},
+ 148: {alias: []collalias{{0b01111111, "ucs2_german2_ci", "ucs2"}}, isdefault: 0b00000000},
+ 149: {alias: []collalias{{0b01110000, "ucs2_croatian_ci", "ucs2"}, {0b00001111, "ucs2_croatian_mysql561_ci", "ucs2"}}, isdefault: 0b00000000},
+ 150: {alias: []collalias{{0b01111111, "ucs2_unicode_520_ci", "ucs2"}}, isdefault: 0b00000000},
+ 151: {alias: []collalias{{0b01111111, "ucs2_vietnamese_ci", "ucs2"}}, isdefault: 0b00000000},
+ 159: {alias: []collalias{{0b01111111, "ucs2_general_mysql500_ci", "ucs2"}}, isdefault: 0b00000000},
+ 160: {alias: []collalias{{0b01111111, "utf32_unicode_ci", "utf32"}}, isdefault: 0b00000000},
+ 161: {alias: []collalias{{0b01111111, "utf32_icelandic_ci", "utf32"}}, isdefault: 0b00000000},
+ 162: {alias: []collalias{{0b01111111, "utf32_latvian_ci", "utf32"}}, isdefault: 0b00000000},
+ 163: {alias: []collalias{{0b01111111, "utf32_romanian_ci", "utf32"}}, isdefault: 0b00000000},
+ 164: {alias: []collalias{{0b01111111, "utf32_slovenian_ci", "utf32"}}, isdefault: 0b00000000},
+ 165: {alias: []collalias{{0b01111111, "utf32_polish_ci", "utf32"}}, isdefault: 0b00000000},
+ 166: {alias: []collalias{{0b01111111, "utf32_estonian_ci", "utf32"}}, isdefault: 0b00000000},
+ 167: {alias: []collalias{{0b01111111, "utf32_spanish_ci", "utf32"}}, isdefault: 0b00000000},
+ 168: {alias: []collalias{{0b01111111, "utf32_swedish_ci", "utf32"}}, isdefault: 0b00000000},
+ 169: {alias: []collalias{{0b01111111, "utf32_turkish_ci", "utf32"}}, isdefault: 0b00000000},
+ 170: {alias: []collalias{{0b01111111, "utf32_czech_ci", "utf32"}}, isdefault: 0b00000000},
+ 171: {alias: []collalias{{0b01111111, "utf32_danish_ci", "utf32"}}, isdefault: 0b00000000},
+ 172: {alias: []collalias{{0b01111111, "utf32_lithuanian_ci", "utf32"}}, isdefault: 0b00000000},
+ 173: {alias: []collalias{{0b01111111, "utf32_slovak_ci", "utf32"}}, isdefault: 0b00000000},
+ 174: {alias: []collalias{{0b01111111, "utf32_spanish2_ci", "utf32"}}, isdefault: 0b00000000},
+ 175: {alias: []collalias{{0b01111111, "utf32_roman_ci", "utf32"}}, isdefault: 0b00000000},
+ 176: {alias: []collalias{{0b01111111, "utf32_persian_ci", "utf32"}}, isdefault: 0b00000000},
+ 177: {alias: []collalias{{0b01111111, "utf32_esperanto_ci", "utf32"}}, isdefault: 0b00000000},
+ 178: {alias: []collalias{{0b01111111, "utf32_hungarian_ci", "utf32"}}, isdefault: 0b00000000},
+ 179: {alias: []collalias{{0b01111111, "utf32_sinhala_ci", "utf32"}}, isdefault: 0b00000000},
+ 180: {alias: []collalias{{0b01111111, "utf32_german2_ci", "utf32"}}, isdefault: 0b00000000},
+ 181: {alias: []collalias{{0b01110000, "utf32_croatian_ci", "utf32"}, {0b00001111, "utf32_croatian_mysql561_ci", "utf32"}}, isdefault: 0b00000000},
+ 182: {alias: []collalias{{0b01111111, "utf32_unicode_520_ci", "utf32"}}, isdefault: 0b00000000},
+ 183: {alias: []collalias{{0b01111111, "utf32_vietnamese_ci", "utf32"}}, isdefault: 0b00000000},
+ 192: {alias: []collalias{{0b01111111, "utf8_unicode_ci", "utf8"}, {0b01111111, "utf8mb3_unicode_ci", "utf8mb3"}}, isdefault: 0b00000000},
+ 193: {alias: []collalias{{0b01111111, "utf8_icelandic_ci", "utf8"}, {0b01111111, "utf8mb3_icelandic_ci", "utf8mb3"}}, isdefault: 0b00000000},
+ 194: {alias: []collalias{{0b01111111, "utf8_latvian_ci", "utf8"}, {0b01111111, "utf8mb3_latvian_ci", "utf8mb3"}}, isdefault: 0b00000000},
+ 195: {alias: []collalias{{0b01111111, "utf8_romanian_ci", "utf8"}, {0b01111111, "utf8mb3_romanian_ci", "utf8mb3"}}, isdefault: 0b00000000},
+ 196: {alias: []collalias{{0b01111111, "utf8_slovenian_ci", "utf8"}, {0b01111111, "utf8mb3_slovenian_ci", "utf8mb3"}}, isdefault: 0b00000000},
+ 197: {alias: []collalias{{0b01111111, "utf8_polish_ci", "utf8"}, {0b01111111, "utf8mb3_polish_ci", "utf8mb3"}}, isdefault: 0b00000000},
+ 198: {alias: []collalias{{0b01111111, "utf8_estonian_ci", "utf8"}, {0b01111111, "utf8mb3_estonian_ci", "utf8mb3"}}, isdefault: 0b00000000},
+ 199: {alias: []collalias{{0b01111111, "utf8_spanish_ci", "utf8"}, {0b01111111, "utf8mb3_spanish_ci", "utf8mb3"}}, isdefault: 0b00000000},
+ 200: {alias: []collalias{{0b01111111, "utf8_swedish_ci", "utf8"}, {0b01111111, "utf8mb3_swedish_ci", "utf8mb3"}}, isdefault: 0b00000000},
+ 201: {alias: []collalias{{0b01111111, "utf8_turkish_ci", "utf8"}, {0b01111111, "utf8mb3_turkish_ci", "utf8mb3"}}, isdefault: 0b00000000},
+ 202: {alias: []collalias{{0b01111111, "utf8_czech_ci", "utf8"}, {0b01111111, "utf8mb3_czech_ci", "utf8mb3"}}, isdefault: 0b00000000},
+ 203: {alias: []collalias{{0b01111111, "utf8_danish_ci", "utf8"}, {0b01111111, "utf8mb3_danish_ci", "utf8mb3"}}, isdefault: 0b00000000},
+ 204: {alias: []collalias{{0b01111111, "utf8_lithuanian_ci", "utf8"}, {0b01111111, "utf8mb3_lithuanian_ci", "utf8mb3"}}, isdefault: 0b00000000},
+ 205: {alias: []collalias{{0b01111111, "utf8_slovak_ci", "utf8"}, {0b01111111, "utf8mb3_slovak_ci", "utf8mb3"}}, isdefault: 0b00000000},
+ 206: {alias: []collalias{{0b01111111, "utf8_spanish2_ci", "utf8"}, {0b01111111, "utf8mb3_spanish2_ci", "utf8mb3"}}, isdefault: 0b00000000},
+ 207: {alias: []collalias{{0b01111111, "utf8_roman_ci", "utf8"}, {0b01111111, "utf8mb3_roman_ci", "utf8mb3"}}, isdefault: 0b00000000},
+ 208: {alias: []collalias{{0b01111111, "utf8_persian_ci", "utf8"}, {0b01111111, "utf8mb3_persian_ci", "utf8mb3"}}, isdefault: 0b00000000},
+ 209: {alias: []collalias{{0b01111111, "utf8_esperanto_ci", "utf8"}, {0b01111111, "utf8mb3_esperanto_ci", "utf8mb3"}}, isdefault: 0b00000000},
+ 210: {alias: []collalias{{0b01111111, "utf8_hungarian_ci", "utf8"}, {0b01111111, "utf8mb3_hungarian_ci", "utf8mb3"}}, isdefault: 0b00000000},
+ 211: {alias: []collalias{{0b01111111, "utf8_sinhala_ci", "utf8"}, {0b01111111, "utf8mb3_sinhala_ci", "utf8mb3"}}, isdefault: 0b00000000},
+ 212: {alias: []collalias{{0b01111111, "utf8_german2_ci", "utf8"}, {0b01111111, "utf8mb3_german2_ci", "utf8mb3"}}, isdefault: 0b00000000},
+ 213: {alias: []collalias{{0b01110000, "utf8_croatian_ci", "utf8"}, {0b00001111, "utf8_croatian_mysql561_ci", "utf8"}, {0b01110000, "utf8mb3_croatian_ci", "utf8mb3"}, {0b00001111, "utf8mb3_croatian_mysql561_ci", "utf8mb3"}}, isdefault: 0b00000000},
+ 214: {alias: []collalias{{0b01111111, "utf8_unicode_520_ci", "utf8"}, {0b01111111, "utf8mb3_unicode_520_ci", "utf8mb3"}}, isdefault: 0b00000000},
+ 215: {alias: []collalias{{0b01111111, "utf8_vietnamese_ci", "utf8"}, {0b01111111, "utf8mb3_vietnamese_ci", "utf8mb3"}}, isdefault: 0b00000000},
+ 223: {alias: []collalias{{0b01111111, "utf8_general_mysql500_ci", "utf8"}, {0b01111111, "utf8mb3_general_mysql500_ci", "utf8mb3"}}, isdefault: 0b00000000},
+ 224: {alias: []collalias{{0b01111111, "utf8mb4_unicode_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 225: {alias: []collalias{{0b01111111, "utf8mb4_icelandic_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 226: {alias: []collalias{{0b01111111, "utf8mb4_latvian_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 227: {alias: []collalias{{0b01111111, "utf8mb4_romanian_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 228: {alias: []collalias{{0b01111111, "utf8mb4_slovenian_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 229: {alias: []collalias{{0b01111111, "utf8mb4_polish_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 230: {alias: []collalias{{0b01111111, "utf8mb4_estonian_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 231: {alias: []collalias{{0b01111111, "utf8mb4_spanish_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 232: {alias: []collalias{{0b01111111, "utf8mb4_swedish_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 233: {alias: []collalias{{0b01111111, "utf8mb4_turkish_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 234: {alias: []collalias{{0b01111111, "utf8mb4_czech_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 235: {alias: []collalias{{0b01111111, "utf8mb4_danish_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 236: {alias: []collalias{{0b01111111, "utf8mb4_lithuanian_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 237: {alias: []collalias{{0b01111111, "utf8mb4_slovak_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 238: {alias: []collalias{{0b01111111, "utf8mb4_spanish2_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 239: {alias: []collalias{{0b01111111, "utf8mb4_roman_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 240: {alias: []collalias{{0b01111111, "utf8mb4_persian_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 241: {alias: []collalias{{0b01111111, "utf8mb4_esperanto_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 242: {alias: []collalias{{0b01111111, "utf8mb4_hungarian_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 243: {alias: []collalias{{0b01111111, "utf8mb4_sinhala_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 244: {alias: []collalias{{0b01111111, "utf8mb4_german2_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 245: {alias: []collalias{{0b01110000, "utf8mb4_croatian_ci", "utf8mb4"}, {0b00001111, "utf8mb4_croatian_mysql561_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 246: {alias: []collalias{{0b01111111, "utf8mb4_unicode_520_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 247: {alias: []collalias{{0b01111111, "utf8mb4_vietnamese_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 248: {alias: []collalias{{0b01100000, "gb18030_chinese_ci", "gb18030"}}, isdefault: 0b01100000},
+ 249: {alias: []collalias{{0b01100000, "gb18030_bin", "gb18030"}}, isdefault: 0b00000000},
+ 250: {alias: []collalias{{0b01100000, "gb18030_unicode_520_ci", "gb18030"}}, isdefault: 0b00000000},
+ 255: {alias: []collalias{{0b01000000, "utf8mb4_0900_ai_ci", "utf8mb4"}}, isdefault: 0b01000000},
+ 256: {alias: []collalias{{0b01000000, "utf8mb4_de_pb_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 257: {alias: []collalias{{0b01000000, "utf8mb4_is_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 258: {alias: []collalias{{0b01000000, "utf8mb4_lv_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 259: {alias: []collalias{{0b01000000, "utf8mb4_ro_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 260: {alias: []collalias{{0b01000000, "utf8mb4_sl_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 261: {alias: []collalias{{0b01000000, "utf8mb4_pl_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 262: {alias: []collalias{{0b01000000, "utf8mb4_et_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 263: {alias: []collalias{{0b01000000, "utf8mb4_es_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 264: {alias: []collalias{{0b01000000, "utf8mb4_sv_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 265: {alias: []collalias{{0b01000000, "utf8mb4_tr_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 266: {alias: []collalias{{0b01000000, "utf8mb4_cs_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 267: {alias: []collalias{{0b01000000, "utf8mb4_da_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 268: {alias: []collalias{{0b01000000, "utf8mb4_lt_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 269: {alias: []collalias{{0b01000000, "utf8mb4_sk_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 270: {alias: []collalias{{0b01000000, "utf8mb4_es_trad_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 271: {alias: []collalias{{0b01000000, "utf8mb4_la_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 273: {alias: []collalias{{0b01000000, "utf8mb4_eo_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 274: {alias: []collalias{{0b01000000, "utf8mb4_hu_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 275: {alias: []collalias{{0b01000000, "utf8mb4_hr_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 277: {alias: []collalias{{0b01000000, "utf8mb4_vi_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 278: {alias: []collalias{{0b01000000, "utf8mb4_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000},
+ 279: {alias: []collalias{{0b01000000, "utf8mb4_de_pb_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000},
+ 280: {alias: []collalias{{0b01000000, "utf8mb4_is_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000},
+ 281: {alias: []collalias{{0b01000000, "utf8mb4_lv_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000},
+ 282: {alias: []collalias{{0b01000000, "utf8mb4_ro_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000},
+ 283: {alias: []collalias{{0b01000000, "utf8mb4_sl_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000},
+ 284: {alias: []collalias{{0b01000000, "utf8mb4_pl_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000},
+ 285: {alias: []collalias{{0b01000000, "utf8mb4_et_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000},
+ 286: {alias: []collalias{{0b01000000, "utf8mb4_es_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000},
+ 287: {alias: []collalias{{0b01000000, "utf8mb4_sv_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000},
+ 288: {alias: []collalias{{0b01000000, "utf8mb4_tr_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000},
+ 289: {alias: []collalias{{0b01000000, "utf8mb4_cs_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000},
+ 290: {alias: []collalias{{0b01000000, "utf8mb4_da_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000},
+ 291: {alias: []collalias{{0b01000000, "utf8mb4_lt_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000},
+ 292: {alias: []collalias{{0b01000000, "utf8mb4_sk_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000},
+ 293: {alias: []collalias{{0b01000000, "utf8mb4_es_trad_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000},
+ 294: {alias: []collalias{{0b01000000, "utf8mb4_la_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000},
+ 296: {alias: []collalias{{0b01000000, "utf8mb4_eo_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000},
+ 297: {alias: []collalias{{0b01000000, "utf8mb4_hu_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000},
+ 298: {alias: []collalias{{0b01000000, "utf8mb4_hr_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000},
+ 300: {alias: []collalias{{0b01000000, "utf8mb4_vi_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000},
+ 303: {alias: []collalias{{0b01000000, "utf8mb4_ja_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000},
+ 304: {alias: []collalias{{0b01000000, "utf8mb4_ja_0900_as_cs_ks", "utf8mb4"}}, isdefault: 0b00000000},
+ 305: {alias: []collalias{{0b01000000, "utf8mb4_0900_as_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 306: {alias: []collalias{{0b01000000, "utf8mb4_ru_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 307: {alias: []collalias{{0b01000000, "utf8mb4_ru_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000},
+ 308: {alias: []collalias{{0b01000000, "utf8mb4_zh_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000},
+ 309: {alias: []collalias{{0b01000000, "utf8mb4_0900_bin", "utf8mb4"}}, isdefault: 0b00000000},
+ 310: {alias: []collalias{{0b01000000, "utf8mb4_nb_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 311: {alias: []collalias{{0b01000000, "utf8mb4_nb_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000},
+ 312: {alias: []collalias{{0b01000000, "utf8mb4_nn_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 313: {alias: []collalias{{0b01000000, "utf8mb4_nn_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000},
+ 314: {alias: []collalias{{0b01000000, "utf8mb4_sr_latn_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 315: {alias: []collalias{{0b01000000, "utf8mb4_sr_latn_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000},
+ 316: {alias: []collalias{{0b01000000, "utf8mb4_bs_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 317: {alias: []collalias{{0b01000000, "utf8mb4_bs_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000},
+ 318: {alias: []collalias{{0b01000000, "utf8mb4_bg_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 319: {alias: []collalias{{0b01000000, "utf8mb4_bg_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000},
+ 320: {alias: []collalias{{0b01000000, "utf8mb4_gl_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 321: {alias: []collalias{{0b01000000, "utf8mb4_gl_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000},
+ 322: {alias: []collalias{{0b01000000, "utf8mb4_mn_cyrl_0900_ai_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 323: {alias: []collalias{{0b01000000, "utf8mb4_mn_cyrl_0900_as_cs", "utf8mb4"}}, isdefault: 0b00000000},
+ 576: {alias: []collalias{{0b00001111, "utf8_croatian_ci", "utf8"}, {0b00001111, "utf8mb3_croatian_ci", "utf8mb3"}}, isdefault: 0b00000000},
+ 577: {alias: []collalias{{0b00001111, "utf8_myanmar_ci", "utf8"}, {0b00001111, "utf8mb3_myanmar_ci", "utf8mb3"}}, isdefault: 0b00000000},
+ 578: {alias: []collalias{{0b00001110, "utf8_thai_520_w2", "utf8"}, {0b00001110, "utf8mb3_thai_520_w2", "utf8mb3"}}, isdefault: 0b00000000},
+ 608: {alias: []collalias{{0b00001111, "utf8mb4_croatian_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 609: {alias: []collalias{{0b00001111, "utf8mb4_myanmar_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 610: {alias: []collalias{{0b00001110, "utf8mb4_thai_520_w2", "utf8mb4"}}, isdefault: 0b00000000},
+ 640: {alias: []collalias{{0b00001111, "ucs2_croatian_ci", "ucs2"}}, isdefault: 0b00000000},
+ 641: {alias: []collalias{{0b00001111, "ucs2_myanmar_ci", "ucs2"}}, isdefault: 0b00000000},
+ 642: {alias: []collalias{{0b00001110, "ucs2_thai_520_w2", "ucs2"}}, isdefault: 0b00000000},
+ 672: {alias: []collalias{{0b00001111, "utf16_croatian_ci", "utf16"}}, isdefault: 0b00000000},
+ 673: {alias: []collalias{{0b00001111, "utf16_myanmar_ci", "utf16"}}, isdefault: 0b00000000},
+ 674: {alias: []collalias{{0b00001110, "utf16_thai_520_w2", "utf16"}}, isdefault: 0b00000000},
+ 736: {alias: []collalias{{0b00001111, "utf32_croatian_ci", "utf32"}}, isdefault: 0b00000000},
+ 737: {alias: []collalias{{0b00001111, "utf32_myanmar_ci", "utf32"}}, isdefault: 0b00000000},
+ 738: {alias: []collalias{{0b00001110, "utf32_thai_520_w2", "utf32"}}, isdefault: 0b00000000},
+ 1025: {alias: []collalias{{0b00001100, "big5_chinese_nopad_ci", "big5"}}, isdefault: 0b00000000},
+ 1027: {alias: []collalias{{0b00001100, "dec8_swedish_nopad_ci", "dec8"}}, isdefault: 0b00000000},
+ 1028: {alias: []collalias{{0b00001100, "cp850_general_nopad_ci", "cp850"}}, isdefault: 0b00000000},
+ 1030: {alias: []collalias{{0b00001100, "hp8_english_nopad_ci", "hp8"}}, isdefault: 0b00000000},
+ 1031: {alias: []collalias{{0b00001100, "koi8r_general_nopad_ci", "koi8r"}}, isdefault: 0b00000000},
+ 1032: {alias: []collalias{{0b00001100, "latin1_swedish_nopad_ci", "latin1"}}, isdefault: 0b00000000},
+ 1033: {alias: []collalias{{0b00001100, "latin2_general_nopad_ci", "latin2"}}, isdefault: 0b00000000},
+ 1034: {alias: []collalias{{0b00001100, "swe7_swedish_nopad_ci", "swe7"}}, isdefault: 0b00000000},
+ 1035: {alias: []collalias{{0b00001100, "ascii_general_nopad_ci", "ascii"}}, isdefault: 0b00000000},
+ 1036: {alias: []collalias{{0b00001100, "ujis_japanese_nopad_ci", "ujis"}}, isdefault: 0b00000000},
+ 1037: {alias: []collalias{{0b00001100, "sjis_japanese_nopad_ci", "sjis"}}, isdefault: 0b00000000},
+ 1040: {alias: []collalias{{0b00001100, "hebrew_general_nopad_ci", "hebrew"}}, isdefault: 0b00000000},
+ 1042: {alias: []collalias{{0b00001100, "tis620_thai_nopad_ci", "tis620"}}, isdefault: 0b00000000},
+ 1043: {alias: []collalias{{0b00001100, "euckr_korean_nopad_ci", "euckr"}}, isdefault: 0b00000000},
+ 1046: {alias: []collalias{{0b00001100, "koi8u_general_nopad_ci", "koi8u"}}, isdefault: 0b00000000},
+ 1048: {alias: []collalias{{0b00001100, "gb2312_chinese_nopad_ci", "gb2312"}}, isdefault: 0b00000000},
+ 1049: {alias: []collalias{{0b00001100, "greek_general_nopad_ci", "greek"}}, isdefault: 0b00000000},
+ 1050: {alias: []collalias{{0b00001100, "cp1250_general_nopad_ci", "cp1250"}}, isdefault: 0b00000000},
+ 1052: {alias: []collalias{{0b00001100, "gbk_chinese_nopad_ci", "gbk"}}, isdefault: 0b00000000},
+ 1054: {alias: []collalias{{0b00001100, "latin5_turkish_nopad_ci", "latin5"}}, isdefault: 0b00000000},
+ 1056: {alias: []collalias{{0b00001100, "armscii8_general_nopad_ci", "armscii8"}}, isdefault: 0b00000000},
+ 1057: {alias: []collalias{{0b00001100, "utf8_general_nopad_ci", "utf8"}, {0b00001100, "utf8mb3_general_nopad_ci", "utf8mb3"}}, isdefault: 0b00000000},
+ 1059: {alias: []collalias{{0b00001100, "ucs2_general_nopad_ci", "ucs2"}}, isdefault: 0b00000000},
+ 1060: {alias: []collalias{{0b00001100, "cp866_general_nopad_ci", "cp866"}}, isdefault: 0b00000000},
+ 1061: {alias: []collalias{{0b00001100, "keybcs2_general_nopad_ci", "keybcs2"}}, isdefault: 0b00000000},
+ 1062: {alias: []collalias{{0b00001100, "macce_general_nopad_ci", "macce"}}, isdefault: 0b00000000},
+ 1063: {alias: []collalias{{0b00001100, "macroman_general_nopad_ci", "macroman"}}, isdefault: 0b00000000},
+ 1064: {alias: []collalias{{0b00001100, "cp852_general_nopad_ci", "cp852"}}, isdefault: 0b00000000},
+ 1065: {alias: []collalias{{0b00001100, "latin7_general_nopad_ci", "latin7"}}, isdefault: 0b00000000},
+ 1067: {alias: []collalias{{0b00001100, "macce_nopad_bin", "macce"}}, isdefault: 0b00000000},
+ 1069: {alias: []collalias{{0b00001100, "utf8mb4_general_nopad_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 1070: {alias: []collalias{{0b00001100, "utf8mb4_nopad_bin", "utf8mb4"}}, isdefault: 0b00000000},
+ 1071: {alias: []collalias{{0b00001100, "latin1_nopad_bin", "latin1"}}, isdefault: 0b00000000},
+ 1074: {alias: []collalias{{0b00001100, "cp1251_nopad_bin", "cp1251"}}, isdefault: 0b00000000},
+ 1075: {alias: []collalias{{0b00001100, "cp1251_general_nopad_ci", "cp1251"}}, isdefault: 0b00000000},
+ 1077: {alias: []collalias{{0b00001100, "macroman_nopad_bin", "macroman"}}, isdefault: 0b00000000},
+ 1078: {alias: []collalias{{0b00001100, "utf16_general_nopad_ci", "utf16"}}, isdefault: 0b00000000},
+ 1079: {alias: []collalias{{0b00001100, "utf16_nopad_bin", "utf16"}}, isdefault: 0b00000000},
+ 1080: {alias: []collalias{{0b00001100, "utf16le_general_nopad_ci", "utf16le"}}, isdefault: 0b00000000},
+ 1081: {alias: []collalias{{0b00001100, "cp1256_general_nopad_ci", "cp1256"}}, isdefault: 0b00000000},
+ 1082: {alias: []collalias{{0b00001100, "cp1257_nopad_bin", "cp1257"}}, isdefault: 0b00000000},
+ 1083: {alias: []collalias{{0b00001100, "cp1257_general_nopad_ci", "cp1257"}}, isdefault: 0b00000000},
+ 1084: {alias: []collalias{{0b00001100, "utf32_general_nopad_ci", "utf32"}}, isdefault: 0b00000000},
+ 1085: {alias: []collalias{{0b00001100, "utf32_nopad_bin", "utf32"}}, isdefault: 0b00000000},
+ 1086: {alias: []collalias{{0b00001100, "utf16le_nopad_bin", "utf16le"}}, isdefault: 0b00000000},
+ 1088: {alias: []collalias{{0b00001100, "armscii8_nopad_bin", "armscii8"}}, isdefault: 0b00000000},
+ 1089: {alias: []collalias{{0b00001100, "ascii_nopad_bin", "ascii"}}, isdefault: 0b00000000},
+ 1090: {alias: []collalias{{0b00001100, "cp1250_nopad_bin", "cp1250"}}, isdefault: 0b00000000},
+ 1091: {alias: []collalias{{0b00001100, "cp1256_nopad_bin", "cp1256"}}, isdefault: 0b00000000},
+ 1092: {alias: []collalias{{0b00001100, "cp866_nopad_bin", "cp866"}}, isdefault: 0b00000000},
+ 1093: {alias: []collalias{{0b00001100, "dec8_nopad_bin", "dec8"}}, isdefault: 0b00000000},
+ 1094: {alias: []collalias{{0b00001100, "greek_nopad_bin", "greek"}}, isdefault: 0b00000000},
+ 1095: {alias: []collalias{{0b00001100, "hebrew_nopad_bin", "hebrew"}}, isdefault: 0b00000000},
+ 1096: {alias: []collalias{{0b00001100, "hp8_nopad_bin", "hp8"}}, isdefault: 0b00000000},
+ 1097: {alias: []collalias{{0b00001100, "keybcs2_nopad_bin", "keybcs2"}}, isdefault: 0b00000000},
+ 1098: {alias: []collalias{{0b00001100, "koi8r_nopad_bin", "koi8r"}}, isdefault: 0b00000000},
+ 1099: {alias: []collalias{{0b00001100, "koi8u_nopad_bin", "koi8u"}}, isdefault: 0b00000000},
+ 1101: {alias: []collalias{{0b00001100, "latin2_nopad_bin", "latin2"}}, isdefault: 0b00000000},
+ 1102: {alias: []collalias{{0b00001100, "latin5_nopad_bin", "latin5"}}, isdefault: 0b00000000},
+ 1103: {alias: []collalias{{0b00001100, "latin7_nopad_bin", "latin7"}}, isdefault: 0b00000000},
+ 1104: {alias: []collalias{{0b00001100, "cp850_nopad_bin", "cp850"}}, isdefault: 0b00000000},
+ 1105: {alias: []collalias{{0b00001100, "cp852_nopad_bin", "cp852"}}, isdefault: 0b00000000},
+ 1106: {alias: []collalias{{0b00001100, "swe7_nopad_bin", "swe7"}}, isdefault: 0b00000000},
+ 1107: {alias: []collalias{{0b00001100, "utf8_nopad_bin", "utf8"}, {0b00001100, "utf8mb3_nopad_bin", "utf8mb3"}}, isdefault: 0b00000000},
+ 1108: {alias: []collalias{{0b00001100, "big5_nopad_bin", "big5"}}, isdefault: 0b00000000},
+ 1109: {alias: []collalias{{0b00001100, "euckr_nopad_bin", "euckr"}}, isdefault: 0b00000000},
+ 1110: {alias: []collalias{{0b00001100, "gb2312_nopad_bin", "gb2312"}}, isdefault: 0b00000000},
+ 1111: {alias: []collalias{{0b00001100, "gbk_nopad_bin", "gbk"}}, isdefault: 0b00000000},
+ 1112: {alias: []collalias{{0b00001100, "sjis_nopad_bin", "sjis"}}, isdefault: 0b00000000},
+ 1113: {alias: []collalias{{0b00001100, "tis620_nopad_bin", "tis620"}}, isdefault: 0b00000000},
+ 1114: {alias: []collalias{{0b00001100, "ucs2_nopad_bin", "ucs2"}}, isdefault: 0b00000000},
+ 1115: {alias: []collalias{{0b00001100, "ujis_nopad_bin", "ujis"}}, isdefault: 0b00000000},
+ 1116: {alias: []collalias{{0b00001100, "geostd8_general_nopad_ci", "geostd8"}}, isdefault: 0b00000000},
+ 1117: {alias: []collalias{{0b00001100, "geostd8_nopad_bin", "geostd8"}}, isdefault: 0b00000000},
+ 1119: {alias: []collalias{{0b00001100, "cp932_japanese_nopad_ci", "cp932"}}, isdefault: 0b00000000},
+ 1120: {alias: []collalias{{0b00001100, "cp932_nopad_bin", "cp932"}}, isdefault: 0b00000000},
+ 1121: {alias: []collalias{{0b00001100, "eucjpms_japanese_nopad_ci", "eucjpms"}}, isdefault: 0b00000000},
+ 1122: {alias: []collalias{{0b00001100, "eucjpms_nopad_bin", "eucjpms"}}, isdefault: 0b00000000},
+ 1125: {alias: []collalias{{0b00001100, "utf16_unicode_nopad_ci", "utf16"}}, isdefault: 0b00000000},
+ 1147: {alias: []collalias{{0b00001100, "utf16_unicode_520_nopad_ci", "utf16"}}, isdefault: 0b00000000},
+ 1152: {alias: []collalias{{0b00001100, "ucs2_unicode_nopad_ci", "ucs2"}}, isdefault: 0b00000000},
+ 1174: {alias: []collalias{{0b00001100, "ucs2_unicode_520_nopad_ci", "ucs2"}}, isdefault: 0b00000000},
+ 1184: {alias: []collalias{{0b00001100, "utf32_unicode_nopad_ci", "utf32"}}, isdefault: 0b00000000},
+ 1206: {alias: []collalias{{0b00001100, "utf32_unicode_520_nopad_ci", "utf32"}}, isdefault: 0b00000000},
+ 1216: {alias: []collalias{{0b00001100, "utf8_unicode_nopad_ci", "utf8"}, {0b00001100, "utf8mb3_unicode_nopad_ci", "utf8mb3"}}, isdefault: 0b00000000},
+ 1238: {alias: []collalias{{0b00001100, "utf8_unicode_520_nopad_ci", "utf8"}, {0b00001100, "utf8mb3_unicode_520_nopad_ci", "utf8mb3"}}, isdefault: 0b00000000},
+ 1248: {alias: []collalias{{0b00001100, "utf8mb4_unicode_nopad_ci", "utf8mb4"}}, isdefault: 0b00000000},
+ 1270: {alias: []collalias{{0b00001100, "utf8mb4_unicode_520_nopad_ci", "utf8mb4"}}, isdefault: 0b00000000},
}
diff --git a/go/mysql/collations/remote/collation.go b/go/mysql/collations/remote/collation.go
index 1e81c429794..dcc2acfee61 100644
--- a/go/mysql/collations/remote/collation.go
+++ b/go/mysql/collations/remote/collation.go
@@ -28,6 +28,7 @@ import (
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/mysql/collations"
"vitess.io/vitess/go/mysql/collations/charset"
+ "vitess.io/vitess/go/mysql/collations/colldata"
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/vt/vthash"
)
@@ -53,22 +54,22 @@ type Collation struct {
err error
}
-var _ collations.Collation = (*Collation)(nil)
+var _ colldata.Collation = (*Collation)(nil)
func makeRemoteCollation(conn *mysql.Conn, collid collations.ID, collname string) *Collation {
- charset := collname
+ cs := collname
if idx := strings.IndexByte(collname, '_'); idx >= 0 {
- charset = collname[:idx]
+ cs = collname[:idx]
}
coll := &Collation{
name: collname,
id: collid,
conn: conn,
- charset: charset,
+ charset: cs,
}
- coll.prefix = fmt.Sprintf("_%s X'", charset)
+ coll.prefix = fmt.Sprintf("_%s X'", cs)
coll.suffix = fmt.Sprintf("' COLLATE %q", collname)
coll.hex = hex.NewEncoder(&coll.sql)
return coll
@@ -204,7 +205,7 @@ func (rp *remotePattern) Match(in []byte) bool {
return match
}
-func (c *Collation) Wildcard(pat []byte, _ rune, _ rune, escape rune) collations.WildcardPattern {
+func (c *Collation) Wildcard(pat []byte, _ rune, _ rune, escape rune) colldata.WildcardPattern {
return &remotePattern{
pattern: fmt.Sprintf("_%s X'%x'", c.charset, pat),
remote: c,
diff --git a/go/mysql/collations/supported.go b/go/mysql/collations/supported.go
new file mode 100644
index 00000000000..4404af2d4fb
--- /dev/null
+++ b/go/mysql/collations/supported.go
@@ -0,0 +1,294 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by makecolldata DO NOT EDIT
+
+package collations
+
+var supported = [...]string{
+ 0x3: "dec8_swedish_ci",
+ 0x4: "cp850_general_ci",
+ 0x5: "latin1_german1_ci",
+ 0x6: "hp8_english_ci",
+ 0x7: "koi8r_general_ci",
+ 0x8: "latin1_swedish_ci",
+ 0x9: "latin2_general_ci",
+ 0xa: "swe7_swedish_ci",
+ 0xb: "ascii_general_ci",
+ 0xc: "ujis_japanese_ci",
+ 0xd: "sjis_japanese_ci",
+ 0xe: "cp1251_bulgarian_ci",
+ 0xf: "latin1_danish_ci",
+ 0x10: "hebrew_general_ci",
+ 0x13: "euckr_korean_ci",
+ 0x14: "latin7_estonian_cs",
+ 0x15: "latin2_hungarian_ci",
+ 0x16: "koi8u_general_ci",
+ 0x17: "cp1251_ukrainian_ci",
+ 0x18: "gb2312_chinese_ci",
+ 0x19: "greek_general_ci",
+ 0x1a: "cp1250_general_ci",
+ 0x1b: "latin2_croatian_ci",
+ 0x1d: "cp1257_lithuanian_ci",
+ 0x1e: "latin5_turkish_ci",
+ 0x20: "armscii8_general_ci",
+ 0x21: "utf8mb3_general_ci",
+ 0x23: "ucs2_general_ci",
+ 0x24: "cp866_general_ci",
+ 0x25: "keybcs2_general_ci",
+ 0x26: "macce_general_ci",
+ 0x27: "macroman_general_ci",
+ 0x28: "cp852_general_ci",
+ 0x29: "latin7_general_ci",
+ 0x2a: "latin7_general_cs",
+ 0x2b: "macce_bin",
+ 0x2c: "cp1250_croatian_ci",
+ 0x2d: "utf8mb4_general_ci",
+ 0x2e: "utf8mb4_bin",
+ 0x2f: "latin1_bin",
+ 0x30: "latin1_general_ci",
+ 0x31: "latin1_general_cs",
+ 0x32: "cp1251_bin",
+ 0x33: "cp1251_general_ci",
+ 0x34: "cp1251_general_cs",
+ 0x35: "macroman_bin",
+ 0x36: "utf16_general_ci",
+ 0x37: "utf16_bin",
+ 0x38: "utf16le_general_ci",
+ 0x39: "cp1256_general_ci",
+ 0x3a: "cp1257_bin",
+ 0x3b: "cp1257_general_ci",
+ 0x3c: "utf32_general_ci",
+ 0x3d: "utf32_bin",
+ 0x3e: "utf16le_bin",
+ 0x3f: "binary",
+ 0x40: "armscii8_bin",
+ 0x41: "ascii_bin",
+ 0x42: "cp1250_bin",
+ 0x43: "cp1256_bin",
+ 0x44: "cp866_bin",
+ 0x45: "dec8_bin",
+ 0x46: "greek_bin",
+ 0x47: "hebrew_bin",
+ 0x48: "hp8_bin",
+ 0x49: "keybcs2_bin",
+ 0x4a: "koi8r_bin",
+ 0x4b: "koi8u_bin",
+ 0x4d: "latin2_bin",
+ 0x4e: "latin5_bin",
+ 0x4f: "latin7_bin",
+ 0x50: "cp850_bin",
+ 0x51: "cp852_bin",
+ 0x52: "swe7_bin",
+ 0x53: "utf8mb3_bin",
+ 0x55: "euckr_bin",
+ 0x56: "gb2312_bin",
+ 0x58: "sjis_bin",
+ 0x5a: "ucs2_bin",
+ 0x5b: "ujis_bin",
+ 0x5c: "geostd8_general_ci",
+ 0x5d: "geostd8_bin",
+ 0x5e: "latin1_spanish_ci",
+ 0x5f: "cp932_japanese_ci",
+ 0x60: "cp932_bin",
+ 0x61: "eucjpms_japanese_ci",
+ 0x62: "eucjpms_bin",
+ 0x63: "cp1250_polish_ci",
+ 0x65: "utf16_unicode_ci",
+ 0x66: "utf16_icelandic_ci",
+ 0x67: "utf16_latvian_ci",
+ 0x68: "utf16_romanian_ci",
+ 0x69: "utf16_slovenian_ci",
+ 0x6a: "utf16_polish_ci",
+ 0x6b: "utf16_estonian_ci",
+ 0x6c: "utf16_spanish_ci",
+ 0x6d: "utf16_swedish_ci",
+ 0x6e: "utf16_turkish_ci",
+ 0x6f: "utf16_czech_ci",
+ 0x70: "utf16_danish_ci",
+ 0x71: "utf16_lithuanian_ci",
+ 0x72: "utf16_slovak_ci",
+ 0x73: "utf16_spanish2_ci",
+ 0x74: "utf16_roman_ci",
+ 0x75: "utf16_persian_ci",
+ 0x76: "utf16_esperanto_ci",
+ 0x77: "utf16_hungarian_ci",
+ 0x78: "utf16_sinhala_ci",
+ 0x79: "utf16_german2_ci",
+ 0x7a: "utf16_croatian_ci",
+ 0x7b: "utf16_unicode_520_ci",
+ 0x7c: "utf16_vietnamese_ci",
+ 0x80: "ucs2_unicode_ci",
+ 0x81: "ucs2_icelandic_ci",
+ 0x82: "ucs2_latvian_ci",
+ 0x83: "ucs2_romanian_ci",
+ 0x84: "ucs2_slovenian_ci",
+ 0x85: "ucs2_polish_ci",
+ 0x86: "ucs2_estonian_ci",
+ 0x87: "ucs2_spanish_ci",
+ 0x88: "ucs2_swedish_ci",
+ 0x89: "ucs2_turkish_ci",
+ 0x8a: "ucs2_czech_ci",
+ 0x8b: "ucs2_danish_ci",
+ 0x8c: "ucs2_lithuanian_ci",
+ 0x8d: "ucs2_slovak_ci",
+ 0x8e: "ucs2_spanish2_ci",
+ 0x8f: "ucs2_roman_ci",
+ 0x90: "ucs2_persian_ci",
+ 0x91: "ucs2_esperanto_ci",
+ 0x92: "ucs2_hungarian_ci",
+ 0x93: "ucs2_sinhala_ci",
+ 0x94: "ucs2_german2_ci",
+ 0x95: "ucs2_croatian_ci",
+ 0x96: "ucs2_unicode_520_ci",
+ 0x97: "ucs2_vietnamese_ci",
+ 0xa0: "utf32_unicode_ci",
+ 0xa1: "utf32_icelandic_ci",
+ 0xa2: "utf32_latvian_ci",
+ 0xa3: "utf32_romanian_ci",
+ 0xa4: "utf32_slovenian_ci",
+ 0xa5: "utf32_polish_ci",
+ 0xa6: "utf32_estonian_ci",
+ 0xa7: "utf32_spanish_ci",
+ 0xa8: "utf32_swedish_ci",
+ 0xa9: "utf32_turkish_ci",
+ 0xaa: "utf32_czech_ci",
+ 0xab: "utf32_danish_ci",
+ 0xac: "utf32_lithuanian_ci",
+ 0xad: "utf32_slovak_ci",
+ 0xae: "utf32_spanish2_ci",
+ 0xaf: "utf32_roman_ci",
+ 0xb0: "utf32_persian_ci",
+ 0xb1: "utf32_esperanto_ci",
+ 0xb2: "utf32_hungarian_ci",
+ 0xb3: "utf32_sinhala_ci",
+ 0xb4: "utf32_german2_ci",
+ 0xb5: "utf32_croatian_ci",
+ 0xb6: "utf32_unicode_520_ci",
+ 0xb7: "utf32_vietnamese_ci",
+ 0xc0: "utf8mb3_unicode_ci",
+ 0xc1: "utf8mb3_icelandic_ci",
+ 0xc2: "utf8mb3_latvian_ci",
+ 0xc3: "utf8mb3_romanian_ci",
+ 0xc4: "utf8mb3_slovenian_ci",
+ 0xc5: "utf8mb3_polish_ci",
+ 0xc6: "utf8mb3_estonian_ci",
+ 0xc7: "utf8mb3_spanish_ci",
+ 0xc8: "utf8mb3_swedish_ci",
+ 0xc9: "utf8mb3_turkish_ci",
+ 0xca: "utf8mb3_czech_ci",
+ 0xcb: "utf8mb3_danish_ci",
+ 0xcc: "utf8mb3_lithuanian_ci",
+ 0xcd: "utf8mb3_slovak_ci",
+ 0xce: "utf8mb3_spanish2_ci",
+ 0xcf: "utf8mb3_roman_ci",
+ 0xd0: "utf8mb3_persian_ci",
+ 0xd1: "utf8mb3_esperanto_ci",
+ 0xd2: "utf8mb3_hungarian_ci",
+ 0xd3: "utf8mb3_sinhala_ci",
+ 0xd4: "utf8mb3_german2_ci",
+ 0xd5: "utf8mb3_croatian_ci",
+ 0xd6: "utf8mb3_unicode_520_ci",
+ 0xd7: "utf8mb3_vietnamese_ci",
+ 0xe0: "utf8mb4_unicode_ci",
+ 0xe1: "utf8mb4_icelandic_ci",
+ 0xe2: "utf8mb4_latvian_ci",
+ 0xe3: "utf8mb4_romanian_ci",
+ 0xe4: "utf8mb4_slovenian_ci",
+ 0xe5: "utf8mb4_polish_ci",
+ 0xe6: "utf8mb4_estonian_ci",
+ 0xe7: "utf8mb4_spanish_ci",
+ 0xe8: "utf8mb4_swedish_ci",
+ 0xe9: "utf8mb4_turkish_ci",
+ 0xea: "utf8mb4_czech_ci",
+ 0xeb: "utf8mb4_danish_ci",
+ 0xec: "utf8mb4_lithuanian_ci",
+ 0xed: "utf8mb4_slovak_ci",
+ 0xee: "utf8mb4_spanish2_ci",
+ 0xef: "utf8mb4_roman_ci",
+ 0xf0: "utf8mb4_persian_ci",
+ 0xf1: "utf8mb4_esperanto_ci",
+ 0xf2: "utf8mb4_hungarian_ci",
+ 0xf3: "utf8mb4_sinhala_ci",
+ 0xf4: "utf8mb4_german2_ci",
+ 0xf5: "utf8mb4_croatian_ci",
+ 0xf6: "utf8mb4_unicode_520_ci",
+ 0xf7: "utf8mb4_vietnamese_ci",
+ 0xfa: "gb18030_unicode_520_ci",
+ 0xff: "utf8mb4_0900_ai_ci",
+ 0x100: "utf8mb4_de_pb_0900_ai_ci",
+ 0x101: "utf8mb4_is_0900_ai_ci",
+ 0x102: "utf8mb4_lv_0900_ai_ci",
+ 0x103: "utf8mb4_ro_0900_ai_ci",
+ 0x104: "utf8mb4_sl_0900_ai_ci",
+ 0x105: "utf8mb4_pl_0900_ai_ci",
+ 0x106: "utf8mb4_et_0900_ai_ci",
+ 0x107: "utf8mb4_es_0900_ai_ci",
+ 0x108: "utf8mb4_sv_0900_ai_ci",
+ 0x109: "utf8mb4_tr_0900_ai_ci",
+ 0x10a: "utf8mb4_cs_0900_ai_ci",
+ 0x10b: "utf8mb4_da_0900_ai_ci",
+ 0x10c: "utf8mb4_lt_0900_ai_ci",
+ 0x10d: "utf8mb4_sk_0900_ai_ci",
+ 0x10e: "utf8mb4_es_trad_0900_ai_ci",
+ 0x10f: "utf8mb4_la_0900_ai_ci",
+ 0x111: "utf8mb4_eo_0900_ai_ci",
+ 0x112: "utf8mb4_hu_0900_ai_ci",
+ 0x113: "utf8mb4_hr_0900_ai_ci",
+ 0x115: "utf8mb4_vi_0900_ai_ci",
+ 0x116: "utf8mb4_0900_as_cs",
+ 0x117: "utf8mb4_de_pb_0900_as_cs",
+ 0x118: "utf8mb4_is_0900_as_cs",
+ 0x119: "utf8mb4_lv_0900_as_cs",
+ 0x11a: "utf8mb4_ro_0900_as_cs",
+ 0x11b: "utf8mb4_sl_0900_as_cs",
+ 0x11c: "utf8mb4_pl_0900_as_cs",
+ 0x11d: "utf8mb4_et_0900_as_cs",
+ 0x11e: "utf8mb4_es_0900_as_cs",
+ 0x11f: "utf8mb4_sv_0900_as_cs",
+ 0x120: "utf8mb4_tr_0900_as_cs",
+ 0x121: "utf8mb4_cs_0900_as_cs",
+ 0x122: "utf8mb4_da_0900_as_cs",
+ 0x123: "utf8mb4_lt_0900_as_cs",
+ 0x124: "utf8mb4_sk_0900_as_cs",
+ 0x125: "utf8mb4_es_trad_0900_as_cs",
+ 0x126: "utf8mb4_la_0900_as_cs",
+ 0x128: "utf8mb4_eo_0900_as_cs",
+ 0x129: "utf8mb4_hu_0900_as_cs",
+ 0x12a: "utf8mb4_hr_0900_as_cs",
+ 0x12c: "utf8mb4_vi_0900_as_cs",
+ 0x12f: "utf8mb4_ja_0900_as_cs",
+ 0x130: "utf8mb4_ja_0900_as_cs_ks",
+ 0x131: "utf8mb4_0900_as_ci",
+ 0x132: "utf8mb4_ru_0900_ai_ci",
+ 0x133: "utf8mb4_ru_0900_as_cs",
+ 0x134: "utf8mb4_zh_0900_as_cs",
+ 0x135: "utf8mb4_0900_bin",
+ 0x136: "utf8mb4_nb_0900_ai_ci",
+ 0x137: "utf8mb4_nb_0900_as_cs",
+ 0x138: "utf8mb4_nn_0900_ai_ci",
+ 0x139: "utf8mb4_nn_0900_as_cs",
+ 0x13a: "utf8mb4_sr_latn_0900_ai_ci",
+ 0x13b: "utf8mb4_sr_latn_0900_as_cs",
+ 0x13c: "utf8mb4_bs_0900_ai_ci",
+ 0x13d: "utf8mb4_bs_0900_as_cs",
+ 0x13e: "utf8mb4_bg_0900_ai_ci",
+ 0x13f: "utf8mb4_bg_0900_as_cs",
+ 0x140: "utf8mb4_gl_0900_ai_ci",
+ 0x141: "utf8mb4_gl_0900_as_cs",
+ 0x142: "utf8mb4_mn_cyrl_0900_ai_ci",
+ 0x143: "utf8mb4_mn_cyrl_0900_as_cs",
+}
diff --git a/go/mysql/collations/testdata/versions/collations_MySQL80.csv b/go/mysql/collations/testdata/versions/collations_MySQL8.csv
similarity index 100%
rename from go/mysql/collations/testdata/versions/collations_MySQL80.csv
rename to go/mysql/collations/testdata/versions/collations_MySQL8.csv
diff --git a/go/mysql/collations/tools/colldump/Dockerfile b/go/mysql/collations/tools/colldump/Dockerfile
new file mode 100644
index 00000000000..3e5acf4d9a6
--- /dev/null
+++ b/go/mysql/collations/tools/colldump/Dockerfile
@@ -0,0 +1,20 @@
+FROM debian:latest
+
+ARG MYSQL_VERSION=8.0.34
+
+RUN apt-get update && apt-get -y install curl cmake build-essential libssl-dev libncurses5-dev pkg-config rapidjson-dev
+
+RUN cd /tmp && \
+ curl -OL https://dev.mysql.com/get/Downloads/MySQL-8.0/mysql-${MYSQL_VERSION}.tar.gz && \
+ tar zxvf mysql-${MYSQL_VERSION}.tar.gz
+
+ADD colldump.cc /tmp/mysql-${MYSQL_VERSION}/strings/colldump.cc
+RUN echo "MYSQL_ADD_EXECUTABLE(colldump colldump.cc SKIP_INSTALL)\nTARGET_LINK_LIBRARIES(colldump strings)\n" >> /tmp/mysql-${MYSQL_VERSION}/strings/CMakeLists.txt
+
+RUN cd /tmp/mysql-${MYSQL_VERSION} && \
+ mkdir build && \
+ cd build && \
+ cmake -DDOWNLOAD_BOOST=1 -DWITH_BOOST=dist/boost .. && \
+ make colldump
+
+RUN mkdir /mysql-collations && /tmp/mysql-${MYSQL_VERSION}/build/runtime_output_directory/colldump /mysql-collations
diff --git a/go/mysql/collations/tools/colldump/colldump.cc b/go/mysql/collations/tools/colldump/colldump.cc
new file mode 100644
index 00000000000..7668ae1dc70
--- /dev/null
+++ b/go/mysql/collations/tools/colldump/colldump.cc
@@ -0,0 +1,418 @@
+/* Copyright (c) 2023, The Vitess Authors
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License, version 2.0,
+ as published by the Free Software Foundation.
+
+ This program is also distributed with certain software (including
+ but not limited to OpenSSL) that is licensed under separate terms,
+ as designated in a particular file or component or in included license
+ documentation. The authors of MySQL hereby grant you an additional
+ permission to link the program and your derivative works with the
+ separately licensed software that they have included with MySQL.
+
+ Without limiting anything contained in the foregoing, this file,
+ which is part of C Driver for MySQL (Connector/C), is also subject to the
+ Universal FOSS Exception, version 1.0, a copy of which can be found at
+ http://oss.oracle.com/licenses/universal-foss-exception.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License, version 2.0, for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "m_ctype.h"
+
+#ifdef HAVE_UNISTD_H
+#include
+#endif
+
+#include "my_sys.h"
+#include "my_config.h"
+#include "my_compiler.h"
+#include "my_inttypes.h"
+#include "my_io.h"
+#include "my_loglevel.h"
+#include "my_macros.h"
+#include "str_uca_type.h"
+
+#include "rapidjson/rapidjson.h"
+#include "rapidjson/filewritestream.h"
+#include "rapidjson/writer.h"
+
+template
+static void print_contractions_1(J &json, my_wc_t *path, size_t depth, bool contextual, const MY_CONTRACTION &contraction)
+{
+ path[depth] = contraction.ch;
+
+ if (contraction.is_contraction_tail)
+ {
+ json.StartObject();
+
+ json.Key("Path");
+ json.StartArray();
+ for (size_t i = 0; i <= depth; i++)
+ {
+ json.Uint((unsigned int)path[i]);
+ }
+ json.EndArray();
+
+ json.Key("Weights");
+ json.StartArray();
+ for (size_t i = 0; i < MY_UCA_MAX_WEIGHT_SIZE; i++)
+ {
+ json.Uint(contraction.weight[i]);
+ }
+ json.EndArray();
+
+ if (contextual)
+ {
+ json.Key("Contextual");
+ json.Bool(true);
+ }
+
+ json.EndObject();
+ }
+
+ for (const MY_CONTRACTION &ctr : contraction.child_nodes)
+ {
+ print_contractions_1(json, path, depth + 1, false, ctr);
+ }
+ for (const MY_CONTRACTION &ctr : contraction.child_nodes_context)
+ {
+ print_contractions_1(json, path, depth + 1, true, ctr);
+ }
+}
+
+template
+static void print_contractions(J &json, std::vector *contractions)
+{
+ my_wc_t path[256];
+ json.StartArray();
+ for (const MY_CONTRACTION &ctr : *contractions)
+ {
+ print_contractions_1(json, path, 0, false, ctr);
+ }
+ json.EndArray();
+}
+
+template
+static void print_reorder_params(J &json, struct Reorder_param *reorder)
+{
+ json.StartArray();
+ for (int i = 0; i < reorder->wt_rec_num; i++)
+ {
+ struct Reorder_wt_rec &r = reorder->wt_rec[i];
+ json.StartArray();
+ json.Uint(r.old_wt_bdy.begin);
+ json.Uint(r.old_wt_bdy.end);
+ json.Uint(r.new_wt_bdy.begin);
+ json.Uint(r.new_wt_bdy.end);
+ json.EndArray();
+ }
+ json.EndArray();
+}
+
+template
+static void print_unipages(J &json, const MY_UNI_IDX *unicodeidx)
+{
+ json.StartArray();
+ for (const MY_UNI_IDX *idx = unicodeidx; idx->tab != NULL; idx++)
+ {
+ json.StartObject();
+ json.Key("From");
+ json.Uint(idx->from);
+ json.Key("To");
+ json.Uint(idx->to);
+ json.Key("Tab");
+ json.StartArray();
+ const size_t entries = idx->to - idx->from;
+ for (size_t i = 0; i <= entries; i++)
+ {
+ json.Uint(idx->tab[i]);
+ }
+ json.EndArray();
+ json.EndObject();
+ }
+ json.EndArray();
+}
+
+template
+static void print_uca_weights_900(J &json, int codepoint, uint16 **weights)
+{
+ uint16 *page = weights[codepoint >> 8];
+ if (page == NULL)
+ return;
+
+ int offset = codepoint & 0xFF;
+ int cecount = page[offset];
+ char key[32];
+ snprintf(key, sizeof(key), "U+%04X", codepoint);
+
+ json.Key(key);
+ json.StartArray();
+ for (int ce = 0; ce < cecount; ce++)
+ {
+ json.Uint(page[256 + (ce * 3 + 0) * 256 + offset]);
+ json.Uint(page[256 + (ce * 3 + 1) * 256 + offset]);
+ json.Uint(page[256 + (ce * 3 + 2) * 256 + offset]);
+ }
+ json.EndArray();
+}
+
+template
+static void print_uca_weights_legacy(J &json, int codepoint, uint16 **weights, uchar *lengths)
+{
+ uint16 *page = weights[codepoint >> 8];
+ if (page == NULL)
+ return;
+
+ int offset = codepoint & 0xFF;
+ uint16 *w = page + offset * lengths[codepoint >> 8];
+ if (!w[0])
+ return;
+
+ char key[32];
+ snprintf(key, sizeof(key), "U+%04X", codepoint);
+
+ json.Key(key);
+ json.StartArray();
+ for (; w[0]; w++)
+ {
+ json.Uint(w[0]);
+ }
+ json.EndArray();
+}
+
+template
+static void print_array_uchar(J &json, const uchar *arr, size_t len)
+{
+ json.StartArray();
+ for (size_t i = 0; i < len; ++i)
+ {
+ json.Uint(arr[i]);
+ }
+ json.EndArray();
+}
+
+template
+static void print_array_uint16(J &json, const uint16 *arr, size_t len)
+{
+ json.StartArray();
+ for (size_t i = 0; i < len; ++i)
+ {
+ json.Uint(arr[i]);
+ }
+ json.EndArray();
+}
+
+static CHARSET_INFO *init_collation(const char *name)
+{
+ MY_CHARSET_LOADER loader;
+ return my_collation_get_by_name(&loader, name, MYF(0));
+}
+
+#define MY_UCA_MAXCHAR (0x10FFFF + 1)
+#define MY_UCA_CHARS_PER_PAGE 256
+
+extern MY_COLLATION_HANDLER my_collation_uca_900_handler;
+extern MY_COLLATION_HANDLER my_collation_any_uca_handler;
+extern MY_COLLATION_HANDLER my_collation_utf16_uca_handler;
+extern MY_COLLATION_HANDLER my_collation_utf32_uca_handler;
+extern MY_COLLATION_HANDLER my_collation_ucs2_uca_handler;
+
+struct KNOWN_HANDLER
+{
+ const char *name;
+ const MY_COLLATION_HANDLER *h;
+};
+
+static KNOWN_HANDLER known_handlers[] = {
+ {"8bit_bin", &my_collation_8bit_bin_handler},
+ {"8bit_simple_ci", &my_collation_8bit_simple_ci_handler},
+ {"any_uca", &my_collation_any_uca_handler},
+ {"uca_900", &my_collation_uca_900_handler},
+ {"utf16_uca", &my_collation_utf16_uca_handler},
+ {"utf32_uca", &my_collation_utf32_uca_handler},
+ {"ucs2_uca", &my_collation_ucs2_uca_handler},
+};
+
+static int dumpall(const char *dumppath)
+{
+ char pathbuf[4096];
+ char jsonbuf[4096 * 4];
+
+ // bootstrap the `all_charsets` collation array
+ init_collation("utf8mb4_0900_ai_ci");
+
+ for (const CHARSET_INFO *charset : all_charsets)
+ {
+ if (!charset || (charset->state & MY_CS_AVAILABLE) == 0)
+ continue;
+
+ charset = init_collation(charset->m_coll_name);
+ snprintf(pathbuf, sizeof(pathbuf), "%s/%s.json", dumppath, charset->m_coll_name);
+
+ FILE *jsonfile = fopen(pathbuf, "w");
+ if (jsonfile == NULL)
+ {
+ fprintf(stderr, "failed to create '%s'\n", pathbuf);
+ return 1;
+ }
+
+ rapidjson::FileWriteStream os(jsonfile, jsonbuf, sizeof(jsonbuf));
+ rapidjson::Writer, rapidjson::ASCII<>> json(os);
+
+ json.StartObject();
+ json.Key("Name");
+ json.String(charset->m_coll_name);
+ json.Key("Charset");
+ json.String(charset->csname);
+ json.Key("Number");
+ json.Uint(charset->number);
+
+ json.Key("Flags");
+ json.StartObject();
+
+ json.Key("Binary");
+ json.Bool((charset->state & MY_CS_BINSORT) != 0);
+ json.Key("ASCII");
+ json.Bool((charset->state & MY_CS_PUREASCII) != 0);
+ json.Key("Default");
+ json.Bool((charset->state & MY_CS_PRIMARY) != 0);
+
+ json.EndObject();
+
+ for (const KNOWN_HANDLER &handler : known_handlers)
+ {
+ if (charset->coll == handler.h)
+ {
+ json.Key("CollationImpl");
+ json.String(handler.name);
+ break;
+ }
+ }
+
+ if (charset->ctype != NULL)
+ {
+ json.Key("CType");
+ print_array_uchar(json, charset->ctype, 256);
+ }
+
+ if (charset->to_lower != NULL)
+ {
+ json.Key("ToLower");
+ print_array_uchar(json, charset->to_lower, 256);
+ }
+
+ if (charset->to_upper != NULL)
+ {
+ json.Key("ToUpper");
+ print_array_uchar(json, charset->to_upper, 256);
+ }
+
+ if (charset->tab_to_uni != NULL)
+ {
+ json.Key("TabToUni");
+ print_array_uint16(json, charset->tab_to_uni, 256);
+ }
+
+ if (charset->tab_from_uni != NULL)
+ {
+ json.Key("TabFromUni");
+ print_unipages(json, charset->tab_from_uni);
+ }
+
+ if (charset->sort_order != NULL)
+ {
+ json.Key("SortOrder");
+ print_array_uchar(json, charset->sort_order, 256);
+ }
+
+ if (charset->uca != NULL)
+ {
+ MY_UCA_INFO *uca = charset->uca;
+
+ json.Key("UCAVersion");
+
+ switch (uca->version)
+ {
+ case UCA_V400:
+ json.Uint(400);
+ break;
+ case UCA_V520:
+ json.Uint(520);
+ break;
+ case UCA_V900:
+ json.Uint(900);
+ break;
+ default:
+ json.Uint(0);
+ break;
+ }
+
+ json.Key("Weights");
+ json.StartObject();
+ if (uca->version == UCA_V900)
+ {
+ for (my_wc_t cp = 0; cp < MY_UCA_MAXCHAR; cp++)
+ {
+ print_uca_weights_900(json, cp, uca->weights);
+ }
+ }
+ else
+ {
+ for (my_wc_t cp = 0; cp < uca->maxchar; cp++)
+ {
+ print_uca_weights_legacy(json, cp, uca->weights, uca->lengths);
+ }
+ }
+ json.EndObject();
+
+ if (uca->have_contractions)
+ {
+ json.Key("Contractions");
+ print_contractions(json, uca->contraction_nodes);
+ }
+ }
+
+ if (charset->coll_param != NULL)
+ {
+ json.Key("UppercaseFirst");
+ json.Bool(charset->coll_param->case_first == CASE_FIRST_UPPER);
+
+ if (charset->coll_param->reorder_param != NULL)
+ {
+ json.Key("Reorder");
+ print_reorder_params(json, charset->coll_param->reorder_param);
+ }
+ }
+
+ json.EndObject();
+ os.Flush();
+ fclose(jsonfile);
+ }
+ return 0;
+}
+
+int main(int argc, char **argv)
+{
+ if (argc < 2)
+ {
+ fprintf(stderr, "usage: %s \n", argv[0]);
+ return 1;
+ }
+
+ return dumpall(argv[1]);
+}
\ No newline at end of file
diff --git a/go/mysql/collations/tools/colldump/colldump.sh b/go/mysql/collations/tools/colldump/colldump.sh
new file mode 100755
index 00000000000..fe6d1d9d7d2
--- /dev/null
+++ b/go/mysql/collations/tools/colldump/colldump.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+cd "$(dirname "$0")"
+docker build --tag mysql-collation-data .
+
+imgid=$(docker create mysql-collation-data)
+docker cp $imgid:/mysql-collations/. ../../testdata/mysqldata
+docker rm -v $imgid
\ No newline at end of file
diff --git a/go/mysql/collations/tools/makecolldata/codegen/codegen.go b/go/mysql/collations/tools/makecolldata/codegen/codegen.go
index cc2d5ad3a90..4fa98f2afd1 100644
--- a/go/mysql/collations/tools/makecolldata/codegen/codegen.go
+++ b/go/mysql/collations/tools/makecolldata/codegen/codegen.go
@@ -24,6 +24,7 @@ import (
"path"
"reflect"
"sort"
+ "time"
"vitess.io/vitess/go/tools/codegen"
)
@@ -64,10 +65,29 @@ func Merge(gens ...*Generator) *Generator {
return result
}
+const licenseFileHeader = `/*
+Copyright %d The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+`
+
func (g *Generator) WriteToFile(out string) {
var file, fmtfile bytes.Buffer
file.Grow(g.Buffer.Len() + 1024)
+ fmt.Fprintf(&file, licenseFileHeader, time.Now().Year())
fmt.Fprintf(&file, "// Code generated by %s DO NOT EDIT\n\n", path.Base(os.Args[0]))
fmt.Fprintf(&file, "package %s\n\n", g.local.Name())
fmt.Fprintf(&file, "import (\n")
diff --git a/go/mysql/collations/tools/makecolldata/codegen/tablegen.go b/go/mysql/collations/tools/makecolldata/codegen/tablegen.go
index 787d41293be..b12d32f59d7 100644
--- a/go/mysql/collations/tools/makecolldata/codegen/tablegen.go
+++ b/go/mysql/collations/tools/makecolldata/codegen/tablegen.go
@@ -24,7 +24,6 @@ import (
"log"
"math/bits"
"os"
- "reflect"
"vitess.io/vitess/go/mysql/collations/internal/uca"
)
@@ -91,7 +90,6 @@ func (pg *EmbedPageGenerator) WritePage16(g *Generator, varname string, values [
func (pg *EmbedPageGenerator) WriteTrailer(g *Generator, embedfile string) {
unsafe := Package("unsafe")
- reflect := Package("reflect")
g.UsePackage("embed")
g.P()
@@ -99,7 +97,7 @@ func (pg *EmbedPageGenerator) WriteTrailer(g *Generator, embedfile string) {
g.P("var weightsUCA_embed_data string")
g.P()
g.P("func weightsUCA_embed(pos, length int) []uint16 {")
- g.P("return (*[0x7fff0000]uint16)(", unsafe, ".Pointer((*", reflect, ".StringHeader)(", unsafe, ".Pointer(&weightsUCA_embed_data)).Data))[pos:pos+length]")
+ g.P("return (*[0x7fff0000]uint16)(", unsafe, ".Pointer(", unsafe, ".StringData(weightsUCA_embed_data)))[pos:pos+length]")
g.P("}")
}
@@ -126,23 +124,12 @@ type entry struct {
weights []uint16
}
-func (e *entry) adjustHangulWeights(tb *TableGenerator, jamos []rune) {
- for _, jamo := range jamos {
- _, entry := tb.entryForCodepoint(jamo)
- e.weights = append(e.weights, entry.weights[0], entry.weights[1], entry.weights[2]+1)
- }
-}
-
type page struct {
n int
entryCount int
entries [uca.CodepointsPerPage]entry
}
-func (p *page) equals(other *page) bool {
- return reflect.DeepEqual(p, other)
-}
-
func (p *page) name(uca string) string {
if p.entryCount == 0 {
panic("cannot name empty page")
diff --git a/go/mysql/collations/tools/makecolldata/main.go b/go/mysql/collations/tools/makecolldata/main.go
index 0bcbd1ecb2b..ee559a886b5 100644
--- a/go/mysql/collations/tools/makecolldata/main.go
+++ b/go/mysql/collations/tools/makecolldata/main.go
@@ -106,7 +106,7 @@ func (all AllMetadata) get(name string) *CollationMetadata {
return nil
}
-const PkgCollations codegen.Package = "vitess.io/vitess/go/mysql/collations"
+const PkgCollationsData codegen.Package = "vitess.io/vitess/go/mysql/collations/colldata"
const PkgCharset codegen.Package = "vitess.io/vitess/go/mysql/collations/charset"
func main() {
@@ -114,5 +114,5 @@ func main() {
metadata := loadMysqlMetadata()
maketables(*Embed, ".", metadata)
makeversions(".")
- makemysqldata(".", metadata)
+ makemysqldata("colldata", ".", metadata)
}
diff --git a/go/mysql/collations/tools/makecolldata/maketables.go b/go/mysql/collations/tools/makecolldata/maketables.go
index 8ac2f9049ce..055162401bb 100644
--- a/go/mysql/collations/tools/makecolldata/maketables.go
+++ b/go/mysql/collations/tools/makecolldata/maketables.go
@@ -39,7 +39,7 @@ func maketable(g *codegen.Generator, table string, collation *CollationMetadata,
func maketables(embed bool, output string, metadata AllMetadata) {
var pages = codegen.NewPageGenerator(embed)
- var g = codegen.NewGenerator("vitess.io/vitess/go/mysql/collations")
+ var g = codegen.NewGenerator("vitess.io/vitess/go/mysql/collations/colldata")
var fastg = codegen.NewGenerator("vitess.io/vitess/go/mysql/collations/internal/uca")
tablegen := maketable(g, "uca900", metadata.get("utf8mb4_0900_ai_ci"), pages, uca.Layout_uca900{})
@@ -53,9 +53,9 @@ func maketables(embed bool, output string, metadata AllMetadata) {
if pages, ok := pages.(*codegen.EmbedPageGenerator); ok {
pages.WriteTrailer(g, "mysqlucadata.bin")
- pages.WriteToFile(path.Join(output, "mysqlucadata.bin"))
+ pages.WriteToFile(path.Join(output, "colldata/mysqlucadata.bin"))
}
- g.WriteToFile(path.Join(output, "mysqlucadata.go"))
+ g.WriteToFile(path.Join(output, "colldata/mysqlucadata.go"))
fastg.WriteToFile(path.Join(output, "internal/uca/fasttables.go"))
}
diff --git a/go/mysql/collations/tools/makecolldata/mysqldata.go b/go/mysql/collations/tools/makecolldata/mysqldata.go
index 351e578d2af..567f04362de 100644
--- a/go/mysql/collations/tools/makecolldata/mysqldata.go
+++ b/go/mysql/collations/tools/makecolldata/mysqldata.go
@@ -353,12 +353,12 @@ func (g *Generator) printCollationMultibyte(meta *CollationMetadata) {
g.P("},")
}
-func makemysqldata(output string, metadata AllMetadata) {
+func makemysqldata(output string, supportedOutput string, metadata AllMetadata) {
var unsupportedByCharset = make(map[string][]string)
var g = Generator{
- Generator: codegen.NewGenerator(PkgCollations),
+ Generator: codegen.NewGenerator(PkgCollationsData),
Tables: TableGenerator{
- Generator: codegen.NewGenerator(PkgCollations),
+ Generator: codegen.NewGenerator(PkgCollationsData),
dedup: make(map[string]string),
baseWeightsUca400: metadata.get("utf8mb4_unicode_ci").Weights,
baseWeightsUca520: metadata.get("utf8mb4_unicode_520_ci").Weights,
@@ -366,15 +366,22 @@ func makemysqldata(output string, metadata AllMetadata) {
},
}
+ var h = Generator{
+ Generator: codegen.NewGenerator("vitess.io/vitess/go/mysql/collations"),
+ }
+
g.P("var collationsById = [...]Collation{")
+ h.P("var supported = [...]string{")
for _, meta := range metadata {
switch {
case meta.Name == "utf8mb4_0900_bin":
g.P(uint(309), ": &Collation_utf8mb4_0900_bin{},")
+ h.P(meta.Number, ": ", codegen.Quote(meta.Name), ",")
case meta.Name == "binary":
g.P(uint(63), ": &Collation_binary{},")
+ h.P(meta.Number, ": ", codegen.Quote(meta.Name), ",")
case meta.Name == "tis620_bin":
// explicitly unsupported for now because of not accurate results
@@ -384,24 +391,31 @@ func makemysqldata(output string, metadata AllMetadata) {
meta.CollationImpl == "utf32_uca" ||
meta.CollationImpl == "ucs2_uca":
g.printCollationUcaLegacy(meta)
+ h.P(meta.Number, ": ", codegen.Quote(meta.Name), ",")
case meta.CollationImpl == "uca_900":
g.printCollationUca900(meta)
+ h.P(meta.Number, ": ", codegen.Quote(meta.Name), ",")
case meta.CollationImpl == "8bit_bin" || meta.CollationImpl == "8bit_simple_ci":
g.printCollation8bit(meta)
+ h.P(meta.Number, ": ", codegen.Quote(meta.Name), ",")
case meta.Name == "gb18030_unicode_520_ci":
g.printCollationUcaLegacy(meta)
+ h.P(meta.Number, ": ", codegen.Quote(meta.Name), ",")
case charset.IsMultibyteByName(meta.Charset):
g.printCollationMultibyte(meta)
+ h.P(meta.Number, ": ", codegen.Quote(meta.Name), ",")
case strings.HasSuffix(meta.Name, "_bin") && charset.IsUnicodeByName(meta.Charset):
g.printCollationUnicode(meta)
+ h.P(meta.Number, ": ", codegen.Quote(meta.Name), ",")
case strings.HasSuffix(meta.Name, "_general_ci"):
g.printCollationUnicode(meta)
+ h.P(meta.Number, ": ", codegen.Quote(meta.Name), ",")
default:
unsupportedByCharset[meta.Charset] = append(unsupportedByCharset[meta.Charset], meta.Name)
@@ -409,7 +423,9 @@ func makemysqldata(output string, metadata AllMetadata) {
}
g.P("}")
+ h.P("}")
codegen.Merge(g.Tables.Generator, g.Generator).WriteToFile(path.Join(output, "mysqldata.go"))
+ h.WriteToFile(path.Join(supportedOutput, "supported.go"))
var unhandledCount int
for impl, collations := range unsupportedByCharset {
diff --git a/go/mysql/collations/tools/makecolldata/mysqlversions.go b/go/mysql/collations/tools/makecolldata/mysqlversions.go
index 5bdd3165e53..f0578ecd95b 100644
--- a/go/mysql/collations/tools/makecolldata/mysqlversions.go
+++ b/go/mysql/collations/tools/makecolldata/mysqlversions.go
@@ -60,6 +60,7 @@ func makeversions(output string) {
}
sort.Strings(versionfiles)
+ charsets := make(map[string]string)
versioninfo := make(map[uint]*versionInfo)
for v, versionCsv := range versionfiles {
f, err := os.Open(versionCsv)
@@ -89,14 +90,17 @@ func makeversions(output string) {
collname := cols[0]
vi.alias[collname] |= 1 << v
+ charsets[collname] = cols[1]
for from, to := range CharsetAliases {
if strings.HasPrefix(collname, from+"_") {
aliased := strings.Replace(collname, from+"_", to+"_", 1)
+ charsets[aliased] = to
vi.alias[aliased] |= 1 << v
}
if strings.HasPrefix(collname, to+"_") {
aliased := strings.Replace(collname, to+"_", from+"_", 1)
+ charsets[aliased] = from
vi.alias[aliased] |= 1 << v
}
}
@@ -123,7 +127,7 @@ func makeversions(output string) {
var g = codegen.NewGenerator("vitess.io/vitess/go/mysql/collations")
g.P("type collver byte")
- g.P("type collalias struct { mask collver; name string }")
+ g.P("type collalias struct { mask collver; name string; charset string }")
g.P("const (")
g.P("collverInvalid collver = 0")
for n, version := range versions {
@@ -150,7 +154,7 @@ func makeversions(output string) {
// all MySQL versions, but this is implemented as a method on `collver` so when
// MySQL maps utf8 to utfmb4, we can perform the mapping only for the specific
// MySQL version onwards.
- g.P("func (v collver) charsetAliases() map[string]string { return ", fmt.Sprintf("%#v", CharsetAliases), "}")
+ g.P("func charsetAliases() map[string]string { return ", fmt.Sprintf("%#v", CharsetAliases), "}")
g.P()
g.P("var globalVersionInfo = map[ID]struct{alias []collalias; isdefault collver}{")
@@ -164,14 +168,14 @@ func makeversions(output string) {
for _, vi := range sorted {
var reverse []alias
for a, m := range vi.alias {
- reverse = append(reverse, alias{m, a})
+ reverse = append(reverse, alias{mask: m, name: a})
}
sort.Slice(reverse, func(i, j int) bool {
return reverse[i].name < reverse[j].name
})
fmt.Fprintf(g, "%d: {alias: []collalias{", vi.id)
for _, a := range reverse {
- fmt.Fprintf(g, "{0b%08b, %q},", a.mask, a.name)
+ fmt.Fprintf(g, "{0b%08b, %q, %q},", a.mask, a.name, charsets[a.name])
}
fmt.Fprintf(g, "}, isdefault: 0b%08b},\n", vi.isdefault)
}
diff --git a/go/mysql/collations/tools/maketestdata/maketestdata.go b/go/mysql/collations/tools/maketestdata/maketestdata.go
index e8cb0daee5d..67d5a4739f6 100644
--- a/go/mysql/collations/tools/maketestdata/maketestdata.go
+++ b/go/mysql/collations/tools/maketestdata/maketestdata.go
@@ -30,6 +30,8 @@ import (
"github.com/spf13/pflag"
+ "vitess.io/vitess/go/mysql/collations/colldata"
+
"vitess.io/vitess/go/internal/flag"
"vitess.io/vitess/go/mysql/collations"
"vitess.io/vitess/go/mysql/collations/charset"
@@ -166,17 +168,17 @@ func main() {
flag.Parse(fs)
var defaults = collations.Local()
- var collationsForLanguage = make(map[testutil.Lang][]collations.Collation)
- var allcollations = defaults.AllCollations()
+ var collationsForLanguage = make(map[testutil.Lang][]collations.ID)
+ var allcollations = colldata.All(defaults)
for lang := range testutil.KnownLanguages {
for _, coll := range allcollations {
if lang.MatchesCollation(coll.Name()) {
- collationsForLanguage[lang] = append(collationsForLanguage[lang], coll)
+ collationsForLanguage[lang] = append(collationsForLanguage[lang], coll.ID())
}
}
}
- var rootCollations = []collations.Collation{
+ var rootCollations = []collations.ID{
defaults.LookupByName("utf8mb4_0900_as_cs"),
defaults.LookupByName("utf8mb4_0900_as_ci"),
defaults.LookupByName("utf8mb4_0900_ai_ci"),
@@ -211,21 +213,22 @@ func main() {
var total int
var collationNames []string
- var interestingCollations []collations.Collation
+ var interestingCollations []collations.ID
interestingCollations = append(interestingCollations, rootCollations...)
interestingCollations = append(interestingCollations, collationsForLanguage[lang]...)
for _, collation := range interestingCollations {
- transcoded, err := charset.ConvertFromUTF8(nil, collation.Charset(), []byte(snippet))
+ transcoded, err := charset.ConvertFromUTF8(nil, colldata.Lookup(collation).Charset(), []byte(snippet))
if err != nil {
- log.Printf("[%s] skip collation %s", lang, collation.Name())
+ log.Printf("[%s] skip collation %s", lang, defaults.LookupName(collation))
continue
}
- weights := colldump(collation.Name(), transcoded)
- gcase.Weights[collation.Name()] = weights
+ colName := defaults.LookupName(collation)
+ weights := colldump(colName, transcoded)
+ gcase.Weights[colName] = weights
total += len(weights)
- collationNames = append(collationNames, collation.Name())
+ collationNames = append(collationNames, colName)
}
log.Printf("[%s] written samples for %d collations (%.02fkb): %s",
diff --git a/go/mysql/conn.go b/go/mysql/conn.go
index b12f4907c0c..1a8efe8978d 100644
--- a/go/mysql/conn.go
+++ b/go/mysql/conn.go
@@ -30,6 +30,8 @@ import (
"sync/atomic"
"time"
+ "vitess.io/vitess/go/mysql/sqlerror"
+
"vitess.io/vitess/go/vt/topo/topoproto"
"vitess.io/vitess/go/mysql/collations"
@@ -216,6 +218,19 @@ type Conn struct {
// See: ConnParams.EnableQueryInfo
enableQueryInfo bool
+ // keepAliveOn marks when keep alive is active on the connection.
+ // This is currently used for testing.
+ keepAliveOn bool
+
+ // mu protects the fields below
+ mu sync.Mutex
+ // cancel keep the cancel function for the current executing query.
+ // this is used by `kill [query|connection] ID` command from other connection.
+ cancel context.CancelFunc
+ // this is used to mark the connection to be closed so that the command phase for the connection can be stopped and
+ // the connection gets closed.
+ closing bool
+
// AccountType is a flag about account authority, inlude rw ro rs
AccountType int8
@@ -281,10 +296,21 @@ func newConn(conn net.Conn) *Conn {
// the server is shutting down, and has the ability to control buffer
// size for reads.
func newServerConn(conn net.Conn, listener *Listener) *Conn {
+ // Enable KeepAlive on TCP connections and change keep-alive period if provided.
+ enabledKeepAlive := false
+ if tcpConn, ok := conn.(*net.TCPConn); ok {
+ if err := setTcpConnProperties(tcpConn, listener.connKeepAlivePeriod); err != nil {
+ log.Errorf("error in setting tcp properties: %v", err)
+ } else {
+ enabledKeepAlive = true
+ }
+ }
+
c := &Conn{
conn: conn,
listener: listener,
PrepareData: make(map[uint32]*PrepareData),
+ keepAliveOn: enabledKeepAlive,
}
if listener.connReadBufferSize > 0 {
@@ -302,6 +328,22 @@ func newServerConn(conn net.Conn, listener *Listener) *Conn {
return c
}
+func setTcpConnProperties(conn *net.TCPConn, keepAlivePeriod time.Duration) error {
+ if err := conn.SetKeepAlive(true); err != nil {
+ return vterrors.Wrapf(err, "unable to enable keepalive on tcp connection")
+ }
+
+ if keepAlivePeriod <= 0 {
+ return nil
+ }
+
+ if err := conn.SetKeepAlivePeriod(keepAlivePeriod); err != nil {
+ return vterrors.Wrapf(err, "unable to set keepalive period on tcp connection")
+ }
+
+ return nil
+}
+
// startWriterBuffering starts using buffered writes. This should
// be terminated by a call to endWriteBuffering.
func (c *Conn) startWriterBuffering() {
@@ -594,7 +636,7 @@ func (c *Conn) readPacket() ([]byte, error) {
func (c *Conn) ReadPacket() ([]byte, error) {
result, err := c.readPacket()
if err != nil {
- return nil, NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err)
+ return nil, sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err)
}
return result, err
}
@@ -718,7 +760,7 @@ func (c *Conn) writeComQuit() error {
data, pos := c.startEphemeralPacketWithHeader(1)
data[pos] = ComQuit
if err := c.writeEphemeralPacket(); err != nil {
- return NewSQLError(CRServerGone, SSUnknownSQLState, err.Error())
+ return sqlerror.NewSQLError(sqlerror.CRServerGone, sqlerror.SSUnknownSQLState, err.Error())
}
return nil
}
@@ -806,7 +848,7 @@ func (c *Conn) writeOKPacketWithHeader(packetOk *PacketOK, headerType byte) erro
bytes, pos := c.startEphemeralPacketWithHeader(length)
data := &coder{data: bytes, pos: pos}
- data.writeByte(headerType) //header - OK or EOF
+ data.writeByte(headerType) // header - OK or EOF
data.writeLenEncInt(packetOk.affectedRows)
data.writeLenEncInt(packetOk.lastInsertID)
data.writeUint16(packetOk.statusFlags)
@@ -856,10 +898,10 @@ func getLenEncInt(i uint64) []byte {
}
func (c *Conn) WriteErrorAndLog(format string, args ...interface{}) bool {
- return c.writeErrorAndLog(ERUnknownComError, SSNetError, format, args...)
+ return c.writeErrorAndLog(sqlerror.ERUnknownComError, sqlerror.SSNetError, format, args...)
}
-func (c *Conn) writeErrorAndLog(errorCode ErrorCode, sqlState string, format string, args ...any) bool {
+func (c *Conn) writeErrorAndLog(errorCode sqlerror.ErrorCode, sqlState string, format string, args ...any) bool {
if err := c.writeErrorPacket(errorCode, sqlState, format, args...); err != nil {
log.Errorf("Error writing error to %s: %v", c, err)
return false
@@ -879,7 +921,7 @@ func (c *Conn) writeErrorPacketFromErrorAndLog(err error) bool {
// writeErrorPacket writes an error packet.
// Server -> Client.
// This method returns a generic error, not a SQLError.
-func (c *Conn) writeErrorPacket(errorCode ErrorCode, sqlState string, format string, args ...any) error {
+func (c *Conn) writeErrorPacket(errorCode sqlerror.ErrorCode, sqlState string, format string, args ...any) error {
errorMessage := fmt.Sprintf(format, args...)
length := 1 + 2 + 1 + 5 + len(errorMessage)
data, pos := c.startEphemeralPacketWithHeader(length)
@@ -887,7 +929,7 @@ func (c *Conn) writeErrorPacket(errorCode ErrorCode, sqlState string, format str
pos = writeUint16(data, pos, uint16(errorCode))
pos = writeByte(data, pos, '#')
if sqlState == "" {
- sqlState = SSUnknownSQLState
+ sqlState = sqlerror.SSUnknownSQLState
}
if len(sqlState) != 5 {
panic("sqlState has to be 5 characters long")
@@ -901,11 +943,11 @@ func (c *Conn) writeErrorPacket(errorCode ErrorCode, sqlState string, format str
// writeErrorPacketFromError writes an error packet, from a regular error.
// See writeErrorPacket for other info.
func (c *Conn) writeErrorPacketFromError(err error) error {
- if se, ok := err.(*SQLError); ok {
+ if se, ok := err.(*sqlerror.SQLError); ok {
return c.writeErrorPacket(se.Num, se.State, "%v", se.Message)
}
- return c.writeErrorPacket(ERUnknownError, SSUnknownSQLState, "unknown error: %v", err)
+ return c.writeErrorPacket(sqlerror.ERUnknownError, sqlerror.SSUnknownSQLState, "unknown error: %v", err)
}
// writeEOFPacket writes an EOF packet, through the buffer, and
@@ -938,6 +980,10 @@ func (c *Conn) handleNextCommand(handler Handler) bool {
if len(data) == 0 {
return false
}
+ // before continue to process the packet, check if the connection should be closed or not.
+ if c.IsMarkedForClose() {
+ return false
+ }
if c.CrossEnable || c.AttachEnable {
if err = handler.CheckAttachedHost(c); err != nil {
@@ -1033,7 +1079,7 @@ func (c *Conn) handleNextCommand(handler Handler) bool {
default:
log.Errorf("Got unhandled packet (default) from %s, returning error: %v", c, data)
c.recycleReadPacket()
- if !c.writeErrorAndLog(ERUnknownComError, SSNetError, "command handling not implemented yet: %v", data[0]) {
+ if !c.writeErrorAndLog(sqlerror.ERUnknownComError, sqlerror.SSNetError, "command handling not implemented yet: %v", data[0]) {
return false
}
}
@@ -1136,7 +1182,7 @@ func (c *Conn) handleComStmtReset(data []byte) bool {
c.recycleReadPacket()
if !ok {
log.Error("Got unhandled packet from client %v, returning error: %v", c.ConnectionID, data)
- if !c.writeErrorAndLog(ERUnknownComError, SSNetError, "error handling packet: %v", data) {
+ if !c.writeErrorAndLog(sqlerror.ERUnknownComError, sqlerror.SSNetError, "error handling packet: %v", data) {
return false
}
}
@@ -1144,7 +1190,7 @@ func (c *Conn) handleComStmtReset(data []byte) bool {
prepare, ok := c.PrepareData[stmtID]
if !ok {
log.Error("Commands were executed in an improper order from client %v, packet: %v", c.ConnectionID, data)
- if !c.writeErrorAndLog(CRCommandsOutOfSync, SSNetError, "commands were executed in an improper order: %v", data) {
+ if !c.writeErrorAndLog(sqlerror.CRCommandsOutOfSync, sqlerror.SSNetError, "commands were executed in an improper order: %v", data) {
return false
}
}
@@ -1166,7 +1212,7 @@ func (c *Conn) handleComStmtSendLongData(data []byte) bool {
if c.CrossEnable || c.AttachEnable {
defer c.recycleReadPacket()
if err := c.crossTabletConn.writePacketNoHeader(data); err != nil {
- if err := c.writeErrorPacket(ERUnknownComError, SSNetError, "command handling not implemented yet: %v", data[0]); err != nil {
+ if err := c.writeErrorPacket(sqlerror.ERUnknownComError, sqlerror.SSNetError, "command handling not implemented yet: %v", data[0]); err != nil {
log.Errorf("Error writing error packet to client: %v", err)
return false
}
@@ -1174,7 +1220,7 @@ func (c *Conn) handleComStmtSendLongData(data []byte) bool {
}
if err := c.crossTabletConn.endWriterBuffering(); err != nil {
- if err := c.writeErrorPacket(ERUnknownComError, SSNetError, "command handling not implemented yet: %v", data[0]); err != nil {
+ if err := c.writeErrorPacket(sqlerror.ERUnknownComError, sqlerror.SSNetError, "command handling not implemented yet: %v", data[0]); err != nil {
log.Errorf("Error writing error packet to client: %v", err)
return false
}
@@ -1224,7 +1270,7 @@ func (c *Conn) handleComStmtExecute(handler Handler, data []byte) (kontinue bool
c.recycleReadPacket()
err := c.crossTabletConn.ptComStmtExecute(edata, c)
if err != nil {
- if err := c.writeErrorPacket(ERUnknownComError, SSNetError, "command handling not implemented yet: %v", edata[0]); err != nil {
+ if err := c.writeErrorPacket(sqlerror.ERUnknownComError, sqlerror.SSNetError, "command handling not implemented yet: %v", edata[0]); err != nil {
log.Errorf("Error writing error packet to client: %v", err)
return false
}
@@ -1285,7 +1331,7 @@ func (c *Conn) handleComStmtExecute(handler Handler, data []byte) (kontinue bool
if !fieldSent {
// This is just a failsafe. Should never happen.
if err == nil || err == io.EOF {
- err = NewSQLErrorFromError(errors.New("unexpected: query ended without no results and no error"))
+ err = sqlerror.NewSQLErrorFromError(errors.New("unexpected: query ended without no results and no error"))
}
if !c.writeErrorPacketFromErrorAndLog(err) {
return false
@@ -1331,7 +1377,7 @@ func (c *Conn) handleComPrepare(handler Handler, data []byte) (kontinue bool) {
data[0] = ComPrepare
copy(data[1:], query)
if err := c.crossTabletConn.ptComPrepare(data, c); err != nil {
- if err := c.writeErrorPacket(ERUnknownComError, SSNetError, "command handling not implemented yet: %v", data[0]); err != nil {
+ if err := c.writeErrorPacket(sqlerror.ERUnknownComError, sqlerror.SSNetError, "command handling not implemented yet: %v", data[0]); err != nil {
log.Errorf("Error writing error packet to client: %v", err)
return false
}
@@ -1427,7 +1473,7 @@ func (c *Conn) handleComSetOption(data []byte) bool {
c.Capabilities &^= CapabilityClientMultiStatements
default:
log.Errorf("Got unhandled packet (ComSetOption default) from client %v, returning error: %v", c.ConnectionID, data)
- if !c.writeErrorAndLog(ERUnknownComError, SSNetError, "error handling packet: %v", data) {
+ if !c.writeErrorAndLog(sqlerror.ERUnknownComError, sqlerror.SSNetError, "error handling packet: %v", data) {
return false
}
}
@@ -1437,7 +1483,7 @@ func (c *Conn) handleComSetOption(data []byte) bool {
}
} else {
log.Errorf("Got unhandled packet (ComSetOption else) from client %v, returning error: %v", c.ConnectionID, data)
- if !c.writeErrorAndLog(ERUnknownComError, SSNetError, "error handling packet: %v", data) {
+ if !c.writeErrorAndLog(sqlerror.ERUnknownComError, sqlerror.SSNetError, "error handling packet: %v", data) {
return false
}
}
@@ -1448,7 +1494,7 @@ func (c *Conn) handleComPing() bool {
c.recycleReadPacket()
if c.CrossEnable || c.AttachEnable {
if err := c.crossTabletConn.Ping(); err != nil {
- if err := c.writeErrorPacket(ERUnknownComError, SSNetError, "command handling not implemented yet: %v", err); err != nil {
+ if err := c.writeErrorPacket(sqlerror.ERUnknownComError, sqlerror.SSNetError, "command handling not implemented yet: %v", err); err != nil {
log.Errorf("Error writing error packet to client: %v", err)
return false
}
@@ -1461,7 +1507,7 @@ func (c *Conn) handleComPing() bool {
}
// Return error if listener was shut down and OK otherwise
if c.listener.shutdown.Load() {
- if !c.writeErrorAndLog(ERServerShutdown, SSNetError, "Server shutdown in progress") {
+ if !c.writeErrorAndLog(sqlerror.ERServerShutdown, sqlerror.SSNetError, "Server shutdown in progress") {
return false
}
} else {
@@ -1473,7 +1519,7 @@ func (c *Conn) handleComPing() bool {
return true
}
-var errEmptyStatement = NewSQLError(EREmptyQuery, SSClientError, "Query was empty")
+var errEmptyStatement = sqlerror.NewSQLError(sqlerror.EREmptyQuery, sqlerror.SSClientError, "Query was empty")
func (c *Conn) handleComQuery(handler Handler, data []byte) (kontinue bool) {
c.startWriterBuffering()
@@ -1494,7 +1540,7 @@ func (c *Conn) handleComQuery(handler Handler, data []byte) (kontinue bool) {
copy(data[1:], query)
if err := c.crossTabletConn.passthroughComQuery(data, c); err != nil {
- if IsConnErrByCross(err) {
+ if sqlerror.IsConnErrByCross(err) {
return false
}
}
@@ -1580,7 +1626,7 @@ func (c *Conn) execQuery(query string, handler Handler, more bool) execResult {
if !callbackCalled {
// This is just a failsafe. Should never happen.
if err == nil || err == io.EOF {
- err = NewSQLErrorFromError(errors.New("unexpected: query ended without no results and no error"))
+ err = sqlerror.NewSQLErrorFromError(errors.New("unexpected: query ended without no results and no error"))
}
if !c.writeErrorPacketFromErrorAndLog(err) {
return connErr
@@ -1719,7 +1765,6 @@ func (c *Conn) parseOKPacket(in []byte) (*PacketOK, error) {
// we can return the packet.
return packetOK, nil
}
-
// Alright, now we need to read each sub packet from the session state change.
for {
sscType, ok := data.readByte()
@@ -1771,7 +1816,7 @@ func ParseErrorPacket(data []byte) error {
// Error code is 2 bytes.
code, pos, ok := readUint16(data, pos)
if !ok {
- return NewSQLError(CRUnknownError, SSUnknownSQLState, "invalid error packet code: %v", data)
+ return sqlerror.NewSQLError(sqlerror.CRUnknownError, sqlerror.SSUnknownSQLState, "invalid error packet code: %v", data)
}
// '#' marker of the SQL state is 1 byte. Ignored.
@@ -1780,13 +1825,13 @@ func ParseErrorPacket(data []byte) error {
// SQL state is 5 bytes
sqlState, pos, ok := readBytes(data, pos, 5)
if !ok {
- return NewSQLError(CRUnknownError, SSUnknownSQLState, "invalid error packet sqlState: %v", data)
+ return sqlerror.NewSQLError(sqlerror.CRUnknownError, sqlerror.SSUnknownSQLState, "invalid error packet sqlState: %v", data)
}
// Human readable error message is the rest.
msg := string(data[pos:])
- return NewSQLError(ErrorCode(code), string(sqlState), "%v", msg)
+ return sqlerror.NewSQLError(sqlerror.ErrorCode(code), string(sqlState), "%v", msg)
}
// GetTLSClientCerts gets TLS certificates.
@@ -1879,7 +1924,7 @@ func (c *Conn) ptComStmtClose(data []byte, clientConn *Conn) error {
// This is a new command, need to reset the sequence.
c.sequence = 0
if err := c.writePacketNoHeader(data); err != nil {
- return NewSQLError(CRServerGone, SSUnknownSQLState, "%v", err)
+ return sqlerror.NewSQLError(sqlerror.CRServerGone, sqlerror.SSUnknownSQLState, "%v", err)
}
if err := c.endWriterBuffering(); err != nil {
return err
@@ -1891,7 +1936,7 @@ func (c *Conn) ptOnePacket(data []byte, clientConn *Conn) error {
// This is a new command, need to reset the sequence.
c.sequence = 0
if err := c.writePacketNoHeader(data); err != nil {
- return NewSQLError(CRServerGone, SSUnknownSQLState, "%v", err)
+ return sqlerror.NewSQLError(sqlerror.CRServerGone, sqlerror.SSUnknownSQLState, "%v", err)
}
if err := c.endWriterBuffering(); err != nil {
return err
@@ -1919,13 +1964,13 @@ func (c *Conn) passthroughComQuery(data []byte, clientConn *Conn) error {
rdata, err := c.readEphemeralPacket()
if err != nil {
- return NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err)
+ return sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err)
}
wdata := make([]byte, len(rdata))
copy(wdata, rdata)
c.recycleReadPacket()
if len(wdata) == 0 {
- return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "invalid empty COM_QUERY response packet")
+ return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "invalid empty COM_QUERY response packet")
}
return c.processData(wdata, clientConn)
@@ -1936,7 +1981,7 @@ func (c *Conn) processData(data []byte, clientConn *Conn) error {
switch data[0] {
case ErrPacket:
err := ParseErrorPacket(data)
- if IsConnErr(err) {
+ if sqlerror.IsConnErr(err) {
return err
}
if err := clientConn.writePacketNoHeader(data); err != nil {
@@ -1957,13 +2002,13 @@ func (c *Conn) processData(data []byte, clientConn *Conn) error {
if packetOk.statusFlags&ServerMoreResultsExists == ServerMoreResultsExists {
data, err := c.readEphemeralPacket()
if err != nil {
- return NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err)
+ return sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err)
}
wdata := make([]byte, len(data))
copy(wdata, data)
c.recycleReadPacket()
if len(wdata) == 0 {
- return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "invalid empty COM_QUERY response packet")
+ return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "invalid empty COM_QUERY response packet")
}
//clientConn.sequence=0
return c.processData(wdata, clientConn)
@@ -2012,10 +2057,10 @@ func (c *Conn) processData(data []byte, clientConn *Conn) error {
colNumber, pos, ok := readLenEncInt(data, 0)
if !ok {
- return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "cannot get column number")
+ return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "cannot get column number")
}
if pos != len(data) {
- return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "extra data in COM_QUERY response")
+ return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "extra data in COM_QUERY response")
}
if colNumber == 0 {
@@ -2045,7 +2090,7 @@ func (c *Conn) processData(data []byte, clientConn *Conn) error {
// EOF is only present here if it's not deprecated.
eofData, err := c.readEphemeralPacket()
if err != nil {
- return NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err)
+ return sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err)
}
if c.isEOFPacket(eofData) {
// This is what we expect.
@@ -2087,11 +2132,11 @@ func (c *Conn) processData(data []byte, clientConn *Conn) error {
if statusFlag&ServerMoreResultsExists == ServerMoreResultsExists {
data, err := c.readEphemeralPacket()
if err != nil {
- return NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err)
+ return sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err)
}
if len(data) == 0 {
c.recycleReadPacket()
- return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "invalid empty COM_QUERY response packet")
+ return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "invalid empty COM_QUERY response packet")
}
wdata := make([]byte, len(data))
copy(wdata, data)
@@ -2112,11 +2157,11 @@ func (c *Conn) processData(data []byte, clientConn *Conn) error {
data, err := c.readEphemeralPacket()
if err != nil {
- return NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err)
+ return sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err)
}
if len(data) == 0 {
c.recycleReadPacket()
- return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "invalid empty COM_QUERY response packet")
+ return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "invalid empty COM_QUERY response packet")
}
wdata := make([]byte, len(data))
copy(wdata, data)
@@ -2142,7 +2187,7 @@ func (c *Conn) ptComPrepare(data []byte, clientConn *Conn) error {
// This is a new command, need to reset the sequence.
c.sequence = 0
if err := c.writePacketNoHeader(data); err != nil {
- return NewSQLError(CRServerGone, SSUnknownSQLState, "%v", err)
+ return sqlerror.NewSQLError(sqlerror.CRServerGone, sqlerror.SSUnknownSQLState, "%v", err)
}
if err := c.endWriterBuffering(); err != nil {
return err
@@ -2156,10 +2201,10 @@ func (c *Conn) ptComPrepare(data []byte, clientConn *Conn) error {
wdata := make([]byte, len(rdata))
copy(wdata, rdata)
if len(wdata) == 0 {
- return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "invalid empty COM_STMT_PREPARE response packet")
+ return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "invalid empty COM_STMT_PREPARE response packet")
}
if len(wdata) == 0 {
- return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "invalid empty COM_STMT_PREPARE response packet")
+ return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "invalid empty COM_STMT_PREPARE response packet")
}
c.recycleReadPacket()
@@ -2196,7 +2241,7 @@ func (c *Conn) ptComPrepare(data []byte, clientConn *Conn) error {
// EOF is only present here if it's not deprecated.
_, err := c.readEphemeralPacket()
if err != nil {
- return NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err)
+ return sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err)
}
c.recycleReadPacket()
}
@@ -2223,7 +2268,7 @@ func (c *Conn) ptComPrepare(data []byte, clientConn *Conn) error {
// EOF is only present here if it's not deprecated.
_, err := c.readEphemeralPacket()
if err != nil {
- return NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err)
+ return sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err)
}
c.recycleReadPacket()
}
@@ -2253,7 +2298,7 @@ func (c *Conn) ptComStmtExecute(data []byte, clientConn *Conn) error {
// This is a new command, need to reset the sequence.
c.sequence = 0
if err := c.writePacketNoHeader(data); err != nil {
- return NewSQLError(CRServerGone, SSUnknownSQLState, "%v", err)
+ return sqlerror.NewSQLError(sqlerror.CRServerGone, sqlerror.SSUnknownSQLState, "%v", err)
}
if err := c.endWriterBuffering(); err != nil {
return err
@@ -2261,13 +2306,13 @@ func (c *Conn) ptComStmtExecute(data []byte, clientConn *Conn) error {
rdata, err := c.readEphemeralPacket()
if err != nil {
- return NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err)
+ return sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err)
}
wdata := make([]byte, len(rdata))
copy(wdata, rdata)
c.recycleReadPacket()
if len(wdata) == 0 {
- return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "invalid empty COM_QUERY response packet")
+ return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "invalid empty COM_QUERY response packet")
}
switch wdata[0] {
@@ -2288,10 +2333,10 @@ func (c *Conn) ptComStmtExecute(data []byte, clientConn *Conn) error {
colNumber, pos, ok := readLenEncInt(wdata, 0)
if !ok {
- return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "cannot get column number")
+ return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "cannot get column number")
}
if pos != len(wdata) {
- return NewSQLError(CRMalformedPacket, SSUnknownSQLState, "extra data in COM_QUERY response")
+ return sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "extra data in COM_QUERY response")
}
if colNumber == 0 {
@@ -2321,7 +2366,7 @@ func (c *Conn) ptComStmtExecute(data []byte, clientConn *Conn) error {
// EOF is only present here if it's not deprecated.
eofData, err := c.readEphemeralPacket()
if err != nil {
- return NewSQLError(CRServerLost, SSUnknownSQLState, "%v", err)
+ return sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", err)
}
if c.isEOFPacket(eofData) {
// This is what we expect.
@@ -2390,7 +2435,7 @@ func (c *Conn) handleComFieldList(handler Handler, data []byte) bool {
c.recycleReadPacket()
if tableName == "" || err != nil {
log.Error("Got unhandled packet from client %v, returning error: %s", c.ConnectionID, data)
- if err := c.writeErrorPacket(ERUnknownComError, SSUnknownComError, "command handling not implemented yet: %v", data[0]); err != nil {
+ if err := c.writeErrorPacket(sqlerror.ERUnknownComError, sqlerror.SSNetError, "command handling not implemented yet: %v", data[0]); err != nil {
log.Error("Error writing error packet to client: %v", err)
return false
}
@@ -2469,3 +2514,38 @@ func (c *Conn) handleComFieldList(handler Handler, data []byte) bool {
}
return true
}
+
+// CancelCtx aborts an existing running query
+func (c *Conn) CancelCtx() {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if c.cancel != nil {
+ c.cancel()
+ }
+}
+
+// UpdateCancelCtx updates the cancel function on the connection.
+func (c *Conn) UpdateCancelCtx(cancel context.CancelFunc) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ c.cancel = cancel
+}
+
+// MarkForClose marks the connection for close.
+func (c *Conn) MarkForClose() {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ c.closing = true
+}
+
+// IsMarkedForClose return true if the connection should be closed.
+func (c *Conn) IsMarkedForClose() bool {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ return c.closing
+}
+
+// GetTestConn returns a conn for testing purpose only.
+func GetTestConn() *Conn {
+ return newConn(testConn{})
+}
diff --git a/go/mysql/conn_fake.go b/go/mysql/conn_fake.go
new file mode 100644
index 00000000000..72d944c2f3b
--- /dev/null
+++ b/go/mysql/conn_fake.go
@@ -0,0 +1,83 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package mysql
+
+import (
+ "fmt"
+ "net"
+ "time"
+)
+
+// testConn to be used for testing only as net.Conn interface implementation.
+type testConn struct {
+ writeToPass []bool
+ pos int
+ queryPacket []byte
+}
+
+func (t testConn) Read(b []byte) (n int, err error) {
+ copy(b, t.queryPacket)
+ return len(b), nil
+}
+
+func (t testConn) Write(b []byte) (n int, err error) {
+ t.pos = t.pos + 1
+ if t.writeToPass[t.pos] {
+ return 0, nil
+ }
+ return 0, fmt.Errorf("error in writing to connection")
+}
+
+func (t testConn) Close() error {
+ return nil
+}
+
+func (t testConn) LocalAddr() net.Addr {
+ panic("implement me")
+}
+
+func (t testConn) RemoteAddr() net.Addr {
+ return mockAddress{s: "a"}
+}
+
+func (t testConn) SetDeadline(t1 time.Time) error {
+ panic("implement me")
+}
+
+func (t testConn) SetReadDeadline(t1 time.Time) error {
+ panic("implement me")
+}
+
+func (t testConn) SetWriteDeadline(t1 time.Time) error {
+ panic("implement me")
+}
+
+var _ net.Conn = (*testConn)(nil)
+
+type mockAddress struct {
+ s string
+}
+
+func (m mockAddress) Network() string {
+ return m.s
+}
+
+func (m mockAddress) String() string {
+ return m.s
+}
+
+var _ net.Addr = (*mockAddress)(nil)
diff --git a/go/mysql/conn_flaky_test.go b/go/mysql/conn_flaky_test.go
index 4111b32872c..538ab1d622e 100644
--- a/go/mysql/conn_flaky_test.go
+++ b/go/mysql/conn_flaky_test.go
@@ -31,6 +31,8 @@ import (
"testing"
"time"
+ "vitess.io/vitess/go/mysql/replication"
+ "vitess.io/vitess/go/mysql/sqlerror"
"vitess.io/vitess/go/vt/sqlparser"
"github.com/stretchr/testify/assert"
@@ -304,7 +306,7 @@ func TestBasicPackets(t *testing.T) {
assert.EqualValues(78, packetOk.warnings)
// Write error packet, read it, compare.
- err = sConn.writeErrorPacket(ERAccessDeniedError, SSAccessDeniedError, "access denied: %v", "reason")
+ err = sConn.writeErrorPacket(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "access denied: %v", "reason")
require.NoError(err)
data, err = cConn.ReadPacket()
require.NoError(err)
@@ -312,10 +314,10 @@ func TestBasicPackets(t *testing.T) {
assert.EqualValues(data[0], ErrPacket, "ErrPacket")
err = ParseErrorPacket(data)
- utils.MustMatch(t, err, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "access denied: reason"), "")
+ utils.MustMatch(t, err, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "access denied: reason"), "")
// Write error packet from error, read it, compare.
- err = sConn.writeErrorPacketFromError(NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "access denied"))
+ err = sConn.writeErrorPacketFromError(sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "access denied"))
require.NoError(err)
data, err = cConn.ReadPacket()
@@ -324,7 +326,7 @@ func TestBasicPackets(t *testing.T) {
assert.EqualValues(data[0], ErrPacket, "ErrPacket")
err = ParseErrorPacket(data)
- utils.MustMatch(t, err, NewSQLError(ERAccessDeniedError, SSAccessDeniedError, "access denied"), "")
+ utils.MustMatch(t, err, sqlerror.NewSQLError(sqlerror.ERAccessDeniedError, sqlerror.SSAccessDeniedError, "access denied"), "")
// Write EOF packet, read it, compare first byte. Payload is always ignored.
err = sConn.writeEOFPacket(0x8912, 0xabba)
@@ -840,9 +842,9 @@ func TestMultiStatement(t *testing.T) {
// this handler will return results according to the query. In case the query contains "error" it will return an error
// panic if the query contains "panic" and it will return selectRowsResult in case of any other query
- handler := &testRun{t: t, err: NewSQLError(CRMalformedPacket, SSUnknownSQLState, "cannot get column number")}
+ handler := &testRun{t: t, err: sqlerror.NewSQLError(sqlerror.CRMalformedPacket, sqlerror.SSUnknownSQLState, "cannot get column number")}
res := sConn.handleNextCommand(handler)
- //The queries run will be select 1; and select 2; These queries do not return any errors, so the connection should still be open
+ // The queries run will be select 1; and select 2; These queries do not return any errors, so the connection should still be open
require.True(t, res, "we should not break the connection in case of no errors")
// Read the result of the query and assert that it is indeed what we want. This will contain the result of the first query.
data, more, _, err := cConn.ReadQueryResult(100, true)
@@ -992,67 +994,6 @@ func TestConnectionErrorWhileWritingComStmtExecute(t *testing.T) {
require.False(t, res, "we should beak the connection in case of error writing error packet")
}
-var _ Handler = (*testRun)(nil)
-
-type testConn struct {
- writeToPass []bool
- pos int
- queryPacket []byte
-}
-
-func (t testConn) Read(b []byte) (n int, err error) {
- copy(b, t.queryPacket)
- return len(b), nil
-}
-
-func (t testConn) Write(b []byte) (n int, err error) {
- t.pos = t.pos + 1
- if t.writeToPass[t.pos] {
- return 0, nil
- }
- return 0, fmt.Errorf("error in writing to connection")
-}
-
-func (t testConn) Close() error {
- panic("implement me")
-}
-
-func (t testConn) LocalAddr() net.Addr {
- panic("implement me")
-}
-
-func (t testConn) RemoteAddr() net.Addr {
- return mockAddress{s: "a"}
-}
-
-func (t testConn) SetDeadline(t1 time.Time) error {
- panic("implement me")
-}
-
-func (t testConn) SetReadDeadline(t1 time.Time) error {
- panic("implement me")
-}
-
-func (t testConn) SetWriteDeadline(t1 time.Time) error {
- panic("implement me")
-}
-
-var _ net.Conn = (*testConn)(nil)
-
-type mockAddress struct {
- s string
-}
-
-func (m mockAddress) Network() string {
- return m.s
-}
-
-func (m mockAddress) String() string {
- return m.s
-}
-
-var _ net.Addr = (*mockAddress)(nil)
-
var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
func randSeq(n int) string {
@@ -1177,7 +1118,7 @@ func (t testRun) ComBinlogDump(c *Conn, logFile string, binlogPos uint32) error
panic("implement me")
}
-func (t testRun) ComBinlogDumpGTID(c *Conn, logFile string, logPos uint64, gtidSet GTIDSet) error {
+func (t testRun) ComBinlogDumpGTID(c *Conn, logFile string, logPos uint64, gtidSet replication.GTIDSet) error {
panic("implement me")
}
diff --git a/go/mysql/constants.go b/go/mysql/constants.go
index e57097a80cf..37649cbeea4 100644
--- a/go/mysql/constants.go
+++ b/go/mysql/constants.go
@@ -17,13 +17,6 @@ limitations under the License.
package mysql
import (
- "strconv"
- "strings"
-
- "golang.org/x/text/encoding"
- "golang.org/x/text/encoding/charmap"
- "golang.org/x/text/encoding/simplifiedchinese"
-
"vitess.io/vitess/go/mysql/binlog"
)
@@ -281,390 +274,6 @@ const (
AuthSwitchRequestPacket = 0xfe
)
-// Error codes for client-side errors.
-// Originally found in include/mysql/errmsg.h and
-// https://dev.mysql.com/doc/mysql-errors/en/client-error-reference.html
-const (
- // CRUnknownError is CR_UNKNOWN_ERROR
- CRUnknownError = ErrorCode(2000)
-
- // CRConnectionError is CR_CONNECTION_ERROR
- // This is returned if a connection via a Unix socket fails.
- CRConnectionError = ErrorCode(2002)
-
- // CRConnHostError is CR_CONN_HOST_ERROR
- // This is returned if a connection via a TCP socket fails.
- CRConnHostError = ErrorCode(2003)
-
- // CRUnknownHost is CR_UNKNOWN_HOST
- // This is returned if the host name cannot be resolved.
- CRUnknownHost = ErrorCode(2005)
-
- // CRServerGone is CR_SERVER_GONE_ERROR.
- // This is returned if the client tries to send a command but it fails.
- CRServerGone = ErrorCode(2006)
-
- // CRVersionError is CR_VERSION_ERROR
- // This is returned if the server versions don't match what we support.
- CRVersionError = ErrorCode(2007)
-
- // CRServerHandshakeErr is CR_SERVER_HANDSHAKE_ERR
- CRServerHandshakeErr = ErrorCode(2012)
-
- // CRServerLost is CR_SERVER_LOST.
- // Used when:
- // - the client cannot write an initial auth packet.
- // - the client cannot read an initial auth packet.
- // - the client cannot read a response from the server.
- // This happens when a running query is killed.
- CRServerLost = ErrorCode(2013)
-
- // CRCommandsOutOfSync is CR_COMMANDS_OUT_OF_SYNC
- // Sent when the streaming calls are not done in the right order.
- CRCommandsOutOfSync = ErrorCode(2014)
-
- // CRNamedPipeStateError is CR_NAMEDPIPESETSTATE_ERROR.
- // This is the highest possible number for a connection error.
- CRNamedPipeStateError = ErrorCode(2018)
-
- // CRCantReadCharset is CR_CANT_READ_CHARSET
- CRCantReadCharset = ErrorCode(2019)
-
- // CRSSLConnectionError is CR_SSL_CONNECTION_ERROR
- CRSSLConnectionError = ErrorCode(2026)
-
- // CRMalformedPacket is CR_MALFORMED_PACKET
- CRMalformedPacket = ErrorCode(2027)
-)
-
-type ErrorCode uint16
-
-func (e ErrorCode) ToString() string {
- return strconv.FormatUint(uint64(e), 10)
-}
-
-// Error codes for server-side errors.
-// Originally found in include/mysql/mysqld_error.h and
-// https://dev.mysql.com/doc/mysql-errors/en/server-error-reference.html
-// The below are in sorted order by value, grouped by vterror code they should be bucketed into.
-// See above reference for more information on each code.
-const (
- // Vitess specific errors, (100-999)
- ERNotReplica = ErrorCode(100)
-
- // unknown
- ERUnknownError = ErrorCode(1105)
-
- // internal
- ERInternalError = ErrorCode(1815)
-
- // unimplemented
- ERNotSupportedYet = ErrorCode(1235)
- ERUnsupportedPS = ErrorCode(1295)
-
- // resource exhausted
- ERDiskFull = ErrorCode(1021)
- EROutOfMemory = ErrorCode(1037)
- EROutOfSortMemory = ErrorCode(1038)
- ERConCount = ErrorCode(1040)
- EROutOfResources = ErrorCode(1041)
- ERRecordFileFull = ErrorCode(1114)
- ERHostIsBlocked = ErrorCode(1129)
- ERCantCreateThread = ErrorCode(1135)
- ERTooManyDelayedThreads = ErrorCode(1151)
- ERNetPacketTooLarge = ErrorCode(1153)
- ERTooManyUserConnections = ErrorCode(1203)
- ERLockTableFull = ErrorCode(1206)
- ERUserLimitReached = ErrorCode(1226)
-
- // deadline exceeded
- ERLockWaitTimeout = ErrorCode(1205)
-
- // unavailable
- ERServerShutdown = ErrorCode(1053)
-
- // not found
- ERDbDropExists = ErrorCode(1008)
- ERCantFindFile = ErrorCode(1017)
- ERFormNotFound = ErrorCode(1029)
- ERKeyNotFound = ErrorCode(1032)
- ERBadFieldError = ErrorCode(1054)
- ERNoSuchThread = ErrorCode(1094)
- ERUnknownTable = ErrorCode(1109)
- ERCantFindUDF = ErrorCode(1122)
- ERNonExistingGrant = ErrorCode(1141)
- ERNoSuchTable = ErrorCode(1146)
- ERNonExistingTableGrant = ErrorCode(1147)
- ERKeyDoesNotExist = ErrorCode(1176)
-
- // permissions
- ERDBAccessDenied = ErrorCode(1044)
- ERAccessDeniedError = ErrorCode(1045)
- ERKillDenied = ErrorCode(1095)
- ERNoPermissionToCreateUsers = ErrorCode(1211)
- ERSpecifiedAccessDenied = ErrorCode(1227)
-
- // failed precondition
- ERNoDb = ErrorCode(1046)
- ERNoSuchIndex = ErrorCode(1082)
- ERCantDropFieldOrKey = ErrorCode(1091)
- ERTableNotLockedForWrite = ErrorCode(1099)
- ERTableNotLocked = ErrorCode(1100)
- ERTooBigSelect = ErrorCode(1104)
- ERNotAllowedCommand = ErrorCode(1148)
- ERTooLongString = ErrorCode(1162)
- ERDelayedInsertTableLocked = ErrorCode(1165)
- ERDupUnique = ErrorCode(1169)
- ERRequiresPrimaryKey = ErrorCode(1173)
- ERCantDoThisDuringAnTransaction = ErrorCode(1179)
- ERReadOnlyTransaction = ErrorCode(1207)
- ERCannotAddForeign = ErrorCode(1215)
- ERNoReferencedRow = ErrorCode(1216)
- ERRowIsReferenced = ErrorCode(1217)
- ERCantUpdateWithReadLock = ErrorCode(1223)
- ERNoDefault = ErrorCode(1230)
- ERMasterFatalReadingBinlog = ErrorCode(1236)
- EROperandColumns = ErrorCode(1241)
- ERSubqueryNo1Row = ErrorCode(1242)
- ERUnknownStmtHandler = ErrorCode(1243)
- ERWarnDataOutOfRange = ErrorCode(1264)
- ERNonUpdateableTable = ErrorCode(1288)
- ERFeatureDisabled = ErrorCode(1289)
- EROptionPreventsStatement = ErrorCode(1290)
- ERDuplicatedValueInType = ErrorCode(1291)
- ERSPDoesNotExist = ErrorCode(1305)
- ERNoDefaultForField = ErrorCode(1364)
- ErSPNotVarArg = ErrorCode(1414)
- ERRowIsReferenced2 = ErrorCode(1451)
- ErNoReferencedRow2 = ErrorCode(1452)
- ERDupIndex = ErrorCode(1831)
- ERInnodbReadOnly = ErrorCode(1874)
-
- // already exists
- ERDbCreateExists = ErrorCode(1007)
- ERTableExists = ErrorCode(1050)
- ERDupEntry = ErrorCode(1062)
- ERFileExists = ErrorCode(1086)
- ERUDFExists = ErrorCode(1125)
-
- // aborted
- ERGotSignal = ErrorCode(1078)
- ERForcingClose = ErrorCode(1080)
- ERAbortingConnection = ErrorCode(1152)
- ERLockDeadlock = ErrorCode(1213)
-
- // invalid arg
- ERUnknownComError = ErrorCode(1047)
- ERBadNullError = ErrorCode(1048)
- ERBadDb = ErrorCode(1049)
- ERBadTable = ErrorCode(1051)
- ERNonUniq = ErrorCode(1052)
- ERWrongFieldWithGroup = ErrorCode(1055)
- ERWrongGroupField = ErrorCode(1056)
- ERWrongSumSelect = ErrorCode(1057)
- ERWrongValueCount = ErrorCode(1058)
- ERTooLongIdent = ErrorCode(1059)
- ERDupFieldName = ErrorCode(1060)
- ERDupKeyName = ErrorCode(1061)
- ERWrongFieldSpec = ErrorCode(1063)
- ERParseError = ErrorCode(1064)
- EREmptyQuery = ErrorCode(1065)
- ERNonUniqTable = ErrorCode(1066)
- ERInvalidDefault = ErrorCode(1067)
- ERMultiplePriKey = ErrorCode(1068)
- ERTooManyKeys = ErrorCode(1069)
- ERTooManyKeyParts = ErrorCode(1070)
- ERTooLongKey = ErrorCode(1071)
- ERKeyColumnDoesNotExist = ErrorCode(1072)
- ERBlobUsedAsKey = ErrorCode(1073)
- ERTooBigFieldLength = ErrorCode(1074)
- ERWrongAutoKey = ErrorCode(1075)
- ERWrongFieldTerminators = ErrorCode(1083)
- ERBlobsAndNoTerminated = ErrorCode(1084)
- ERTextFileNotReadable = ErrorCode(1085)
- ERWrongSubKey = ErrorCode(1089)
- ERCantRemoveAllFields = ErrorCode(1090)
- ERUpdateTableUsed = ErrorCode(1093)
- ERNoTablesUsed = ErrorCode(1096)
- ERTooBigSet = ErrorCode(1097)
- ERBlobCantHaveDefault = ErrorCode(1101)
- ERWrongDbName = ErrorCode(1102)
- ERWrongTableName = ErrorCode(1103)
- ERUnknownProcedure = ErrorCode(1106)
- ERWrongParamCountToProcedure = ErrorCode(1107)
- ERWrongParametersToProcedure = ErrorCode(1108)
- ERFieldSpecifiedTwice = ErrorCode(1110)
- ERInvalidGroupFuncUse = ErrorCode(1111)
- ERTableMustHaveColumns = ErrorCode(1113)
- ERUnknownCharacterSet = ErrorCode(1115)
- ERTooManyTables = ErrorCode(1116)
- ERTooManyFields = ErrorCode(1117)
- ERTooBigRowSize = ErrorCode(1118)
- ERWrongOuterJoin = ErrorCode(1120)
- ERNullColumnInIndex = ErrorCode(1121)
- ERFunctionNotDefined = ErrorCode(1128)
- ERWrongValueCountOnRow = ErrorCode(1136)
- ERInvalidUseOfNull = ErrorCode(1138)
- ERRegexpError = ErrorCode(1139)
- ERMixOfGroupFuncAndFields = ErrorCode(1140)
- ERIllegalGrantForTable = ErrorCode(1144)
- ERSyntaxError = ErrorCode(1149)
- ERWrongColumnName = ErrorCode(1166)
- ERWrongKeyColumn = ErrorCode(1167)
- ERBlobKeyWithoutLength = ErrorCode(1170)
- ERPrimaryCantHaveNull = ErrorCode(1171)
- ERTooManyRows = ErrorCode(1172)
- ERLockOrActiveTransaction = ErrorCode(1192)
- ERUnknownSystemVariable = ErrorCode(1193)
- ERSetConstantsOnly = ErrorCode(1204)
- ERWrongArguments = ErrorCode(1210)
- ERWrongUsage = ErrorCode(1221)
- ERWrongNumberOfColumnsInSelect = ErrorCode(1222)
- ERDupArgument = ErrorCode(1225)
- ERLocalVariable = ErrorCode(1228)
- ERGlobalVariable = ErrorCode(1229)
- ERWrongValueForVar = ErrorCode(1231)
- ERWrongTypeForVar = ErrorCode(1232)
- ERVarCantBeRead = ErrorCode(1233)
- ERCantUseOptionHere = ErrorCode(1234)
- ERIncorrectGlobalLocalVar = ErrorCode(1238)
- ERWrongFKDef = ErrorCode(1239)
- ERKeyRefDoNotMatchTableRef = ErrorCode(1240)
- ERCyclicReference = ErrorCode(1245)
- ERIllegalReference = ErrorCode(1247)
- ERDerivedMustHaveAlias = ErrorCode(1248)
- ERTableNameNotAllowedHere = ErrorCode(1250)
- ERCollationCharsetMismatch = ErrorCode(1253)
- ERWarnDataTruncated = ErrorCode(1265)
- ERCantAggregate2Collations = ErrorCode(1267)
- ERCantAggregate3Collations = ErrorCode(1270)
- ERCantAggregateNCollations = ErrorCode(1271)
- ERVariableIsNotStruct = ErrorCode(1272)
- ERUnknownCollation = ErrorCode(1273)
- ERWrongNameForIndex = ErrorCode(1280)
- ERWrongNameForCatalog = ErrorCode(1281)
- ERBadFTColumn = ErrorCode(1283)
- ERTruncatedWrongValue = ErrorCode(1292)
- ERTooMuchAutoTimestampCols = ErrorCode(1293)
- ERInvalidOnUpdate = ErrorCode(1294)
- ERUnknownTimeZone = ErrorCode(1298)
- ERInvalidCharacterString = ErrorCode(1300)
- ERQueryInterrupted = ErrorCode(1317)
- ERTruncatedWrongValueForField = ErrorCode(1366)
- ERIllegalValueForType = ErrorCode(1367)
- ERDataTooLong = ErrorCode(1406)
- ErrWrongValueForType = ErrorCode(1411)
- ERNoSuchUser = ErrorCode(1449)
- ERForbidSchemaChange = ErrorCode(1450)
- ERWrongValue = ErrorCode(1525)
- ERDataOutOfRange = ErrorCode(1690)
- ERInvalidJSONText = ErrorCode(3140)
- ERInvalidJSONTextInParams = ErrorCode(3141)
- ERInvalidJSONBinaryData = ErrorCode(3142)
- ERInvalidJSONCharset = ErrorCode(3144)
- ERInvalidCastToJSON = ErrorCode(3147)
- ERJSONValueTooBig = ErrorCode(3150)
- ERJSONDocumentTooDeep = ErrorCode(3157)
-
- // max execution time exceeded
- ERQueryTimeout = ErrorCode(3024)
-
- ErrCantCreateGeometryObject = ErrorCode(1416)
- ErrGISDataWrongEndianess = ErrorCode(3055)
- ErrNotImplementedForCartesianSRS = ErrorCode(3704)
- ErrNotImplementedForProjectedSRS = ErrorCode(3705)
- ErrNonPositiveRadius = ErrorCode(3706)
-
- // server not available
- ERServerIsntAvailable = ErrorCode(3168)
-)
-
-// Sql states for errors.
-// Originally found in include/mysql/sql_state.h
-const (
- // SSUnknownSqlstate is ER_SIGNAL_EXCEPTION in
- // include/mysql/sql_state.h, but:
- // const char *unknown_sqlstate= "HY000"
- // in client.c. So using that one.
- SSUnknownSQLState = "HY000"
-
- // SSNetError is network related error
- SSNetError = "08S01"
-
- // SSUnknownComError is ER_UNKNOWN_COM_ERROR
- SSUnknownComError = "08S01"
-
- // SSWrongNumberOfColumns is related to columns error
- SSWrongNumberOfColumns = "21000"
-
- // SSWrongValueCountOnRow is related to columns count mismatch error
- SSWrongValueCountOnRow = "21S01"
-
- // SSDataTooLong is ER_DATA_TOO_LONG
- SSDataTooLong = "22001"
-
- // SSDataOutOfRange is ER_DATA_OUT_OF_RANGE
- SSDataOutOfRange = "22003"
-
- // SSConstraintViolation is constraint violation
- SSConstraintViolation = "23000"
-
- // SSCantDoThisDuringAnTransaction is
- // ER_CANT_DO_THIS_DURING_AN_TRANSACTION
- SSCantDoThisDuringAnTransaction = "25000"
-
- // SSAccessDeniedError is ER_ACCESS_DENIED_ERROR
- SSAccessDeniedError = "28000"
-
- // SSNoDB is ER_NO_DB_ERROR
- SSNoDB = "3D000"
-
- // SSLockDeadlock is ER_LOCK_DEADLOCK
- SSLockDeadlock = "40001"
-
- // SSClientError is the state on client errors
- SSClientError = "42000"
-
- // SSDupFieldName is ER_DUP_FIELD_NAME
- SSDupFieldName = "42S21"
-
- // SSBadFieldError is ER_BAD_FIELD_ERROR
- SSBadFieldError = "42S22"
-
- // SSUnknownTable is ER_UNKNOWN_TABLE
- SSUnknownTable = "42S02"
-
- // SSQueryInterrupted is ER_QUERY_INTERRUPTED;
- SSQueryInterrupted = "70100"
-)
-
-// CharacterSetEncoding maps a charset name to a golang encoder.
-// golang does not support encoders for all MySQL charsets.
-// A charset not in this map is unsupported.
-// A trivial encoding (e.g. utf8) has a `nil` encoder
-var CharacterSetEncoding = map[string]encoding.Encoding{
- "cp850": charmap.CodePage850,
- "koi8r": charmap.KOI8R,
- "latin1": charmap.Windows1252,
- "latin2": charmap.ISO8859_2,
- "ascii": nil,
- "hebrew": charmap.ISO8859_8,
- "greek": charmap.ISO8859_7,
- "cp1250": charmap.Windows1250,
- "gbk": simplifiedchinese.GBK,
- "latin5": charmap.ISO8859_9,
- "utf8": nil,
- "utf8mb3": nil,
- "cp866": charmap.CodePage866,
- "cp852": charmap.CodePage852,
- "latin7": charmap.ISO8859_13,
- "utf8mb4": nil,
- "cp1251": charmap.Windows1251,
- "cp1256": charmap.Windows1256,
- "cp1257": charmap.Windows1257,
- "binary": nil,
-}
-
// IsNum returns true if a MySQL type is a numeric value.
// It is the same as IS_NUM defined in mysql.h.
func IsNum(typ uint8) bool {
@@ -672,128 +281,3 @@ func IsNum(typ uint8) bool {
typ == binlog.TypeYear ||
typ == binlog.TypeNewDecimal
}
-
-// IsConnErr returns true if the error is a connection error.
-func IsConnErr(err error) bool {
- if IsTooManyConnectionsErr(err) {
- return false
- }
- if sqlErr, ok := err.(*SQLError); ok {
- num := sqlErr.Number()
- return (num >= CRUnknownError && num <= CRNamedPipeStateError) || num == ERQueryInterrupted
- }
- return false
-}
-
-// IsConnLostDuringQuery returns true if the error is a CRServerLost error.
-// Happens most commonly when a query is killed MySQL server-side.
-func IsConnLostDuringQuery(err error) bool {
- if sqlErr, ok := err.(*SQLError); ok {
- num := sqlErr.Number()
- return (num == CRServerLost)
- }
- return false
-}
-
-// IsEphemeralError returns true if the error is ephemeral and the caller should
-// retry if possible. Note: non-SQL errors are always treated as ephemeral.
-func IsEphemeralError(err error) bool {
- if sqlErr, ok := err.(*SQLError); ok {
- en := sqlErr.Number()
- switch en {
- case
- CRConnectionError,
- CRConnHostError,
- CRMalformedPacket,
- CRNamedPipeStateError,
- CRServerHandshakeErr,
- CRServerGone,
- CRServerLost,
- CRSSLConnectionError,
- CRUnknownError,
- CRUnknownHost,
- ERCantCreateThread,
- ERDiskFull,
- ERForcingClose,
- ERGotSignal,
- ERHostIsBlocked,
- ERLockTableFull,
- ERInnodbReadOnly,
- ERInternalError,
- ERLockDeadlock,
- ERLockWaitTimeout,
- ERQueryTimeout,
- EROutOfMemory,
- EROutOfResources,
- EROutOfSortMemory,
- ERQueryInterrupted,
- ERServerIsntAvailable,
- ERServerShutdown,
- ERTooManyUserConnections,
- ERUnknownError,
- ERUserLimitReached:
- return true
- default:
- return false
- }
- }
- // If it's not an sqlError then we assume it's ephemeral
- return true
-}
-
-// IsTooManyConnectionsErr returns true if the error is due to too many connections.
-func IsTooManyConnectionsErr(err error) bool {
- if sqlErr, ok := err.(*SQLError); ok {
- if sqlErr.Number() == CRServerHandshakeErr && strings.Contains(sqlErr.Message, "Too many connections") {
- return true
- }
- }
- return false
-}
-
-// IsSchemaApplyError returns true when given error is a MySQL error applying schema change
-func IsSchemaApplyError(err error) bool {
- merr, isSQLErr := err.(*SQLError)
- if !isSQLErr {
- return false
- }
- switch merr.Num {
- case
- ERDupKeyName,
- ERCantDropFieldOrKey,
- ERTableExists,
- ERDupFieldName:
- return true
- }
- return false
-}
-
-type ReplicationState int32
-
-const (
- ReplicationStateUnknown ReplicationState = iota
- ReplicationStateStopped
- ReplicationStateConnecting
- ReplicationStateRunning
-)
-
-// ReplicationStatusToState converts a value you have for the IO thread(s) or SQL
-// thread(s) or Group Replication applier thread(s) from MySQL or intermediate
-// layers to a mysql.ReplicationState.
-// on,yes,true == ReplicationStateRunning
-// off,no,false == ReplicationStateStopped
-// connecting == ReplicationStateConnecting
-// anything else == ReplicationStateUnknown
-func ReplicationStatusToState(s string) ReplicationState {
- // Group Replication uses ON instead of Yes
- switch strings.ToLower(s) {
- case "yes", "on", "true":
- return ReplicationStateRunning
- case "no", "off", "false":
- return ReplicationStateStopped
- case "connecting":
- return ReplicationStateConnecting
- default:
- return ReplicationStateUnknown
- }
-}
diff --git a/go/mysql/constants_test.go b/go/mysql/constants_test.go
index 34d8c09ca54..1a54aad4c02 100644
--- a/go/mysql/constants_test.go
+++ b/go/mysql/constants_test.go
@@ -21,6 +21,8 @@ import (
"testing"
"github.com/stretchr/testify/assert"
+
+ "vitess.io/vitess/go/mysql/sqlerror"
)
func TestIsConnErr(t *testing.T) {
@@ -31,23 +33,23 @@ func TestIsConnErr(t *testing.T) {
in: errors.New("t"),
want: false,
}, {
- in: NewSQLError(5, "", ""),
+ in: sqlerror.NewSQLError(5, "", ""),
want: false,
}, {
- in: NewSQLError(CRServerGone, "", ""),
+ in: sqlerror.NewSQLError(sqlerror.CRServerGone, "", ""),
want: true,
}, {
- in: NewSQLError(CRServerLost, "", ""),
+ in: sqlerror.NewSQLError(sqlerror.CRServerLost, "", ""),
want: true,
}, {
- in: NewSQLError(ERQueryInterrupted, "", ""),
+ in: sqlerror.NewSQLError(sqlerror.ERQueryInterrupted, "", ""),
want: true,
}, {
- in: NewSQLError(CRCantReadCharset, "", ""),
+ in: sqlerror.NewSQLError(sqlerror.CRCantReadCharset, "", ""),
want: false,
}}
for _, tcase := range testcases {
- got := IsConnErr(tcase.in)
+ got := sqlerror.IsConnErr(tcase.in)
assert.Equal(t, tcase.want, got, "IsConnErr(%#v): %v, want %v", tcase.in, got, tcase.want)
}
@@ -61,23 +63,23 @@ func TestIsConnLostDuringQuery(t *testing.T) {
in: errors.New("t"),
want: false,
}, {
- in: NewSQLError(5, "", ""),
+ in: sqlerror.NewSQLError(5, "", ""),
want: false,
}, {
- in: NewSQLError(CRServerGone, "", ""),
+ in: sqlerror.NewSQLError(sqlerror.CRServerGone, "", ""),
want: false,
}, {
- in: NewSQLError(CRServerLost, "", ""),
+ in: sqlerror.NewSQLError(sqlerror.CRServerLost, "", ""),
want: true,
}, {
- in: NewSQLError(ERQueryInterrupted, "", ""),
+ in: sqlerror.NewSQLError(sqlerror.ERQueryInterrupted, "", ""),
want: false,
}, {
- in: NewSQLError(CRCantReadCharset, "", ""),
+ in: sqlerror.NewSQLError(sqlerror.CRCantReadCharset, "", ""),
want: false,
}}
for _, tcase := range testcases {
- got := IsConnLostDuringQuery(tcase.in)
+ got := sqlerror.IsConnLostDuringQuery(tcase.in)
assert.Equal(t, tcase.want, got, "IsConnLostDuringQuery(%#v): %v, want %v", tcase.in, got, tcase.want)
}
diff --git a/go/mysql/datetime/types.go b/go/mysql/datetime/datetime.go
similarity index 70%
rename from go/mysql/datetime/types.go
rename to go/mysql/datetime/datetime.go
index 559b44d2d6a..cc1fc92e091 100644
--- a/go/mysql/datetime/types.go
+++ b/go/mysql/datetime/datetime.go
@@ -17,6 +17,7 @@ limitations under the License.
package datetime
import (
+ "encoding/binary"
"time"
"vitess.io/vitess/go/mysql/decimal"
@@ -94,7 +95,7 @@ func (t Time) FormatDecimal() decimal.Decimal {
}
func (t Time) ToDateTime() (out DateTime) {
- return FromStdTime(t.ToStdTime(time.Local))
+ return NewDateTimeFromStd(t.ToStdTime(time.Local))
}
func (t Time) IsZero() bool {
@@ -243,12 +244,12 @@ func (d Date) Hash(h *vthash.Hasher) {
h.Write8(d.day)
}
-func (dt Date) Weekday() time.Weekday {
- return dt.ToStdTime(time.Local).Weekday()
+func (d Date) Weekday() time.Weekday {
+ return d.ToStdTime(time.Local).Weekday()
}
-func (dt Date) Yearday() int {
- return dt.ToStdTime(time.Local).YearDay()
+func (d Date) Yearday() int {
+ return d.ToStdTime(time.Local).YearDay()
}
func (d Date) ISOWeek() (int, int) {
@@ -405,7 +406,19 @@ func (t Time) ToDuration() time.Duration {
}
func (t Time) toStdTime(year int, month time.Month, day int, loc *time.Location) (out time.Time) {
- return time.Date(year, month, day, 0, 0, 0, 0, loc).Add(t.ToDuration())
+ hours := t.Hour()
+ minutes := t.Minute()
+ secs := t.Second()
+ nsecs := t.Nanosecond()
+
+ if t.Neg() {
+ hours = -hours
+ minutes = -minutes
+ secs = -secs
+ nsecs = -nsecs
+ }
+
+ return time.Date(year, month, day, hours, minutes, secs, nsecs, loc)
}
func (t Time) ToStdTime(loc *time.Location) (out time.Time) {
@@ -413,6 +426,20 @@ func (t Time) ToStdTime(loc *time.Location) (out time.Time) {
return t.toStdTime(year, month, day, loc)
}
+func (t Time) AddInterval(itv *Interval, stradd bool) (Time, uint8, bool) {
+ dt := DateTime{Time: t}
+ ok := dt.addInterval(itv)
+ return dt.Time, itv.precision(stradd), ok
+}
+
+func (t Time) toSeconds() int {
+ tsecs := t.Hour()*secondsPerHour + t.Minute()*secondsPerMinute + t.Second()
+ if t.Neg() {
+ return -tsecs
+ }
+ return tsecs
+}
+
func (d Date) ToStdTime(loc *time.Location) (out time.Time) {
return time.Date(d.Year(), time.Month(d.Month()), d.Day(), 0, 0, 0, 0, loc)
}
@@ -471,6 +498,12 @@ func (d Date) Compare(d2 Date) int {
return 0
}
+func (d Date) AddInterval(itv *Interval) (Date, bool) {
+ dt := DateTime{Date: d}
+ ok := dt.addInterval(itv)
+ return dt.Date, ok
+}
+
func (dt DateTime) FormatInt64() int64 {
d := dt.Round(0)
return d.Date.FormatInt64()*1000000 + d.Time.FormatInt64()
@@ -493,7 +526,7 @@ func (dt DateTime) Compare(dt2 DateTime) int {
case zerodate1 || zerodate2:
// if we're comparing a time to a datetime, we need to normalize them
// both into datetimes; this normalization is not trivial because negative
- // times result in a date change, to let the standard library handle this
+ // times result in a date change, so let the standard library handle this
return dt.ToStdTime(time.Local).Compare(dt2.ToStdTime(time.Local))
}
if cmp := dt.Date.Compare(dt2.Date); cmp != 0 {
@@ -502,6 +535,11 @@ func (dt DateTime) Compare(dt2 DateTime) int {
return dt.Time.Compare(dt2.Time)
}
+func (dt DateTime) AddInterval(itv *Interval, stradd bool) (DateTime, uint8, bool) {
+ ok := dt.addInterval(itv)
+ return dt, itv.precision(stradd), ok
+}
+
func (dt DateTime) Round(p int) (r DateTime) {
if dt.Time.nanosecond == 0 {
return dt
@@ -521,28 +559,138 @@ func (dt DateTime) Round(p int) (r DateTime) {
r = dt
if n == 1e9 {
r.Time.nanosecond = 0
- return FromStdTime(r.ToStdTime(time.Local).Add(time.Second))
+ return NewDateTimeFromStd(r.ToStdTime(time.Local).Add(time.Second))
}
r.Time.nanosecond = uint32(n)
return r
}
-func FromStdTime(t time.Time) DateTime {
+func (dt DateTime) toSeconds() int {
+ return (dt.Date.Day()-1)*secondsPerDay + dt.Time.toSeconds()
+}
+
+func (dt *DateTime) addInterval(itv *Interval) bool {
+ switch {
+ case itv.unit.HasTimeParts():
+ if !itv.inRange() {
+ return false
+ }
+
+ nsec := dt.Time.Nanosecond() + itv.nsec
+ sec := dt.toSeconds() + itv.toSeconds() + (nsec / int(time.Second))
+ nsec = nsec % int(time.Second)
+
+ if nsec < 0 {
+ nsec += int(time.Second)
+ sec--
+ }
+
+ days := sec / secondsPerDay
+ sec -= days * secondsPerDay
+
+ if sec < 0 {
+ sec += secondsPerDay
+ days--
+ }
+
+ dt.Time.nanosecond = uint32(nsec)
+ dt.Time.second = uint8(sec % secondsPerMinute)
+ dt.Time.minute = uint8((sec / secondsPerMinute) % secondsPerMinute)
+ dt.Time.hour = uint16(sec / secondsPerHour)
+
+ daynum := mysqlDayNumber(dt.Date.Year(), dt.Date.Month(), 1) + days
+ if daynum < 0 || daynum > maxDay {
+ return false
+ }
+
+ dt.Date.year, dt.Date.month, dt.Date.day = mysqlDateFromDayNumber(daynum)
+ return true
+
+ case itv.unit.HasDayParts():
+ daynum := mysqlDayNumber(dt.Date.Year(), dt.Date.Month(), dt.Date.Day())
+ daynum += itv.day
+ dt.Date.year, dt.Date.month, dt.Date.day = mysqlDateFromDayNumber(daynum)
+ return true
+
+ case itv.unit.HasMonthParts():
+ months := dt.Date.Year()*12 + itv.year*12 + (dt.Date.Month() - 1) + itv.month
+ if months < 0 || months >= 120000 {
+ return false
+ }
+
+ year := months / 12
+ month := (months % 12) + 1
+
+ dt.Date.year = uint16(year)
+ dt.Date.month = uint8(month)
+
+ // MySQL quirk: if the original date was in a day that the new month
+ // doesn't have, the date is offset backwards to the last day of
+ // the new month. This is the opposite to normal date handling where
+ // we'd offset days into the next month.
+ if dim := daysIn(time.Month(month), year); dt.Date.Day() > dim {
+ dt.Date.day = uint8(dim)
+ }
+ return true
+
+ case itv.unit == IntervalYear:
+ if itv.year > 10000 {
+ return false
+ }
+
+ year := dt.Date.Year() + itv.year
+ dt.Date.year = uint16(year)
+
+ // MySQL quirk: if the original date was Feb 29th on a leap year, and
+ // the resulting year is not a leap year, the date is offset backwards.
+ // This is the opposite to what normal date handling does.
+ if dt.Date.Month() == 2 && dt.Date.Day() == 29 && !isLeap(year) {
+ dt.Date.day = 28
+ }
+ return true
+
+ default:
+ panic("unexpected IntervalType")
+ }
+}
+
+func (dt DateTime) WeightString(dst []byte) []byte {
+ // This logic does the inverse of what we do in the binlog parser for the datetime2 type.
+ year, month, day := dt.Date.Year(), dt.Date.Month(), dt.Date.Day()
+ ymd := uint64(year*13+month)<<5 | uint64(day)
+ hms := uint64(dt.Time.Hour())<<12 | uint64(dt.Time.Minute())<<6 | uint64(dt.Time.Second())
+ raw := (ymd<<17|hms)<<24 + uint64(dt.Time.Nanosecond()/1000)
+ if dt.Time.Neg() {
+ raw = -raw
+ }
+
+ raw = raw ^ (1 << 63)
+ return binary.BigEndian.AppendUint64(dst, raw)
+}
+
+func NewDateFromStd(t time.Time) Date {
year, month, day := t.Date()
+ return Date{
+ year: uint16(year),
+ month: uint8(month),
+ day: uint8(day),
+ }
+}
+
+func NewTimeFromStd(t time.Time) Time {
hour, min, sec := t.Clock()
nsec := t.Nanosecond()
+ return Time{
+ hour: uint16(hour),
+ minute: uint8(min),
+ second: uint8(sec),
+ nanosecond: uint32(nsec),
+ }
+}
+func NewDateTimeFromStd(t time.Time) DateTime {
return DateTime{
- Date: Date{
- year: uint16(year),
- month: uint8(month),
- day: uint8(day),
- },
- Time: Time{
- hour: uint16(hour),
- minute: uint8(min),
- second: uint8(sec),
- nanosecond: uint32(nsec),
- },
+ Date: NewDateFromStd(t),
+ Time: NewTimeFromStd(t),
}
}
diff --git a/go/mysql/datetime/helpers.go b/go/mysql/datetime/helpers.go
index 8b53a376d7c..33d673782fc 100644
--- a/go/mysql/datetime/helpers.go
+++ b/go/mysql/datetime/helpers.go
@@ -198,7 +198,7 @@ func getnuml(s string, l int) (int, string, bool) {
}
func getnumn(s string) (int, string, bool) {
- if !isDigit(s, 0) {
+ if len(s) == 0 || !('0' <= s[0] && s[0] <= '9') {
return 0, s, false
}
@@ -229,6 +229,14 @@ var daysBefore = [...]int32{
31 + 28 + 31 + 30 + 31 + 30 + 31 + 31 + 30 + 31 + 30 + 31,
}
+var daysInMonth = [...]int{
+ 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31,
+}
+
+var daysInMonthLeap = [...]int{
+ 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31,
+}
+
func daysIn(m time.Month, year int) int {
if m == time.February && isLeap(year) {
return 29
@@ -240,6 +248,13 @@ func isLeap(year int) bool {
return year%4 == 0 && (year%100 != 0 || year%400 == 0)
}
+func daysInYear(year int) int {
+ if isLeap(year) {
+ return 366
+ }
+ return 365
+}
+
func parseNanoseconds[bytes []byte | string](value bytes, nbytes int) (ns int, l int, ok bool) {
if value[0] != '.' {
return 0, 0, false
@@ -268,3 +283,9 @@ func parseNanoseconds[bytes []byte | string](value bytes, nbytes int) (ns int, l
return
}
+
+const (
+ secondsPerMinute = 60
+ secondsPerHour = 60 * secondsPerMinute
+ secondsPerDay = 24 * secondsPerHour
+)
diff --git a/go/mysql/datetime/interval.go b/go/mysql/datetime/interval.go
new file mode 100644
index 00000000000..21395f2174d
--- /dev/null
+++ b/go/mysql/datetime/interval.go
@@ -0,0 +1,425 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package datetime
+
+import (
+ "math"
+ "math/bits"
+ "strconv"
+ "strings"
+
+ "vitess.io/vitess/go/hack"
+ "vitess.io/vitess/go/mysql/decimal"
+)
+
+// IntervalType represents the temporal elements contained in an Interval.
+// Intervals in MySQL can contain more than one temporal element. We define their types as
+// a bitset to let us efficiently query the temporal elements that form each interval.
+// There are two kinds of IntervalTypes: unary and compound. Unary interval types contain
+// a single temporal element (e.g. SECONDS, or DAYS) and hence contain only one bit set.
+// Compount interval types are the logical combination of several unary interval types.
+type IntervalType uint8
+
+// IntervalType constants.
+const (
+ // Unary interval types
+ IntervalNone IntervalType = 0
+ IntervalMicrosecond IntervalType = 1 << 0
+ IntervalSecond IntervalType = 1 << 1
+ IntervalMinute IntervalType = 1 << 2
+ IntervalHour IntervalType = 1 << 3
+ IntervalDay IntervalType = 1 << 4
+ IntervalMonth IntervalType = 1 << 5
+ IntervalYear IntervalType = 1 << 6
+ intervalMulti IntervalType = 1 << 7
+
+ // IntervalWeek and IntervalQuarter are an exception for unary interval types,
+ // which are not unique temporal elements but instead a modifier on a unary element
+ // - WEEK is just a count of DAYS multiplied by 7
+ // - QUARTER is just a count of MONTHS multiplied by 3
+ IntervalWeek = IntervalDay | intervalMulti
+ IntervalQuarter = IntervalMonth | intervalMulti
+
+ // Compound interval types
+ IntervalSecondMicrosecond = IntervalSecond | IntervalMicrosecond
+ IntervalMinuteMicrosecond = IntervalMinute | IntervalSecond | IntervalMicrosecond
+ IntervalMinuteSecond = IntervalMinute | IntervalSecond
+ IntervalHourMicrosecond = IntervalHour | IntervalMinute | IntervalSecond | IntervalMicrosecond
+ IntervalHourSecond = IntervalHour | IntervalMinute | IntervalSecond
+ IntervalHourMinute = IntervalHour | IntervalMinute
+ IntervalDayMicrosecond = IntervalDay | IntervalHour | IntervalMinute | IntervalSecond | IntervalMicrosecond
+ IntervalDaySecond = IntervalDay | IntervalHour | IntervalMinute | IntervalSecond
+ IntervalDayMinute = IntervalDay | IntervalHour | IntervalMinute
+ IntervalDayHour = IntervalDay | IntervalHour
+ IntervalYearMonth = IntervalYear | IntervalMonth
+)
+
+type intervalSetter func(tp *Interval, val int)
+
+var intervalSet = [...]intervalSetter{
+ intervalSetMicrosecond,
+ intervalSetSecond,
+ intervalSetMinute,
+ intervalSetHour,
+ intervalSetDay,
+ intervalSetMonth,
+ intervalSetYear,
+}
+
+// setter returns the setter method for this interval's type.
+// If this is a unary interval, it'll return the setter for the interval's unary type.
+// If this is a compound interval, it'll return the setter for the smallest unary type
+// in the interval.
+func (itv IntervalType) setter() intervalSetter {
+ // find the lowest bit set in the interval, this is the smallest unary type
+ unary := itv & -itv
+
+ // map from an unary interval type to its offset by counting the trailing
+ // zeroes. e.g. for HOUR(1 << 3), this will return 3, which the position
+ // for the HOUR setter in intervalSet
+ return intervalSet[bits.TrailingZeros8(uint8(unary))]
+}
+
+func (itv IntervalType) PartCount() int {
+ return bits.OnesCount8(uint8(itv & ^intervalMulti))
+}
+
+func (itv IntervalType) HasTimeParts() bool {
+ return itv&(IntervalHour|IntervalMinute|IntervalSecond|IntervalMicrosecond) != 0
+}
+
+func (itv IntervalType) HasDateParts() bool {
+ return itv&(IntervalYear|IntervalMonth|IntervalDay) != 0
+}
+
+func (itv IntervalType) HasDayParts() bool {
+ return (itv & IntervalDay) != 0
+}
+
+func (itv IntervalType) HasMonthParts() bool {
+ return (itv & IntervalMonth) != 0
+}
+
+func (itv IntervalType) NeedsPrecision() bool {
+ return itv&IntervalMicrosecond != 0
+}
+
+// ToString returns the type as a string
+func (itv IntervalType) ToString() string {
+ switch itv {
+ case IntervalYear:
+ return "year"
+ case IntervalQuarter:
+ return "quarter"
+ case IntervalMonth:
+ return "month"
+ case IntervalWeek:
+ return "week"
+ case IntervalDay:
+ return "day"
+ case IntervalHour:
+ return "hour"
+ case IntervalMinute:
+ return "minute"
+ case IntervalSecond:
+ return "second"
+ case IntervalMicrosecond:
+ return "microsecond"
+ case IntervalYearMonth:
+ return "year_month"
+ case IntervalDayHour:
+ return "day_hour"
+ case IntervalDayMinute:
+ return "day_minute"
+ case IntervalDaySecond:
+ return "day_second"
+ case IntervalHourMinute:
+ return "hour_minute"
+ case IntervalHourSecond:
+ return "hour_second"
+ case IntervalMinuteSecond:
+ return "minute_second"
+ case IntervalDayMicrosecond:
+ return "day_microsecond"
+ case IntervalHourMicrosecond:
+ return "hour_microsecond"
+ case IntervalMinuteMicrosecond:
+ return "minute_microsecond"
+ case IntervalSecondMicrosecond:
+ return "second_microsecond"
+ default:
+ return "[unknown IntervalType]"
+ }
+}
+
+func intervalSetYear(tp *Interval, val int) {
+ tp.year = val
+}
+
+func intervalSetMonth(tp *Interval, val int) {
+ // if the intervalMulti flag is set, this interval expects QUARTERS instead of months
+ if tp.unit&intervalMulti != 0 {
+ val = val * 3
+ }
+ tp.month = val
+}
+
+func intervalSetDay(tp *Interval, val int) {
+ // if the intervalMulti flag is set, this interval expects WEEKS instead of days
+ if tp.unit&intervalMulti != 0 {
+ val = val * 7
+ }
+ tp.day = val
+}
+
+func intervalSetHour(tp *Interval, val int) {
+ tp.hour = val
+}
+
+func intervalSetMinute(tp *Interval, val int) {
+ tp.min = val
+}
+
+func intervalSetSecond(tp *Interval, val int) {
+ tp.sec = val
+}
+
+func intervalSetMicrosecond(tp *Interval, val int) {
+ // if we are setting the Microseconds in this interval, but the
+ // interval's type isn't explicitly MICROSECOND (i.e. it's an interval
+ // with several values besides MICROSECOND), the value being passed
+ // here won't be a fixed number of microseconds, but a fractional part.
+ // We need to scale it into microseconds.
+ // E.g. when parsing a SECOND:MICROSECOND value of '1.5', the input
+ // to this setter will be 5, but the interval doesn't contain 5 microseconds,
+ // it contains 500000. We perform the scaling into 6 digits using base10 log.
+ if tp.unit != IntervalMicrosecond {
+ digits := int(math.Log10(float64(val)) + 1)
+ val = val * int(math.Pow10(6-digits))
+ }
+ // we store nsec internally, so convert from microseconds to nanoseconds
+ tp.nsec = val * 1000
+}
+
+// parseIntervalFields parses a internal string into separate numeric fields.
+// The parsing is extremely lax according to MySQL. Any contiguous run of numbers
+// is considered a field, and any non-numeric character is ignored.
+func parseIntervalFields(itv string, negate *bool) (fields []int) {
+ if len(itv) > 0 && itv[0] == '-' {
+ *negate = !*negate
+ itv = itv[1:]
+ }
+
+ for {
+ for len(itv) > 0 && !('0' <= itv[0] && itv[0] <= '9') {
+ itv = itv[1:]
+ }
+ if len(itv) == 0 {
+ break
+ }
+
+ var n int
+ for len(itv) > 0 && '0' <= itv[0] && itv[0] <= '9' {
+ n = n*10 + int(itv[0]-'0')
+ itv = itv[1:]
+ }
+
+ fields = append(fields, n)
+ }
+ return
+}
+
+type Interval struct {
+ timeparts
+ unit IntervalType
+}
+
+func (itv *Interval) Unit() IntervalType {
+ return itv.unit
+}
+
+const maxDay = 3652424
+
+func (itv *Interval) inRange() bool {
+ if itv.day > maxDay {
+ return false
+ }
+ if itv.hour > maxDay*24 {
+ return false
+ }
+ if itv.min > maxDay*24*60 {
+ return false
+ }
+ if itv.sec > maxDay*24*60*60 {
+ return false
+ }
+ return true
+}
+
+// setFromFields sets the duration of interval from a slice of fields and
+// the given interval type.
+// This follow's MySQL's behavior: if there are fewer fields than the ones
+// we'd expect to see in the interval's type, we pick the RIGHTMOST as
+// the values for the interval.
+// E.g. if our interval type wants HOUR:MINUTE:SECOND and we have [1, 1]
+// as input fields, the resulting interval is '1min1sec'
+func (itv *Interval) setFromFields(fields []int, unit IntervalType) bool {
+ parts := unit.PartCount()
+ if parts == 1 {
+ unit.setter()(itv, fields[0])
+ return true
+ }
+ if len(fields) > 3 && parts < 4 {
+ return false
+ }
+
+ for f, set := range intervalSet {
+ if len(fields) == 0 {
+ break
+ }
+ if unit&(1<= 3652500 {
+ return 0, 0, 0
+ }
+
+ year := daynr * 100 / 36525
+ leapAdjust := (((year-1)/100 + 1) * 3) / 4
+ yday := (daynr - year*365) - (year-1)/4 + leapAdjust
+
+ if diy := daysInYear(year); yday > diy {
+ yday -= diy
+ year++
+ }
+
+ daycount := daysInMonth
+ if isLeap(year) {
+ daycount = daysInMonthLeap
+ }
+ for month, dim := range daycount {
+ if yday <= dim {
+ return uint16(year), uint8(month + 1), uint8(yday)
+ }
+ yday -= dim
+ }
+
+ panic("unreachable: yday is too large?")
+}
diff --git a/go/mysql/datetime/mydate_test.go b/go/mysql/datetime/mydate_test.go
new file mode 100644
index 00000000000..29ecd2df9d2
--- /dev/null
+++ b/go/mysql/datetime/mydate_test.go
@@ -0,0 +1,59 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package datetime
+
+import (
+ "encoding/json"
+ "os"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestDayNumber(t *testing.T) {
+ td, err := os.Open("testdata/year_to_daynr.json")
+ require.NoError(t, err)
+ defer td.Close()
+
+ var expected []int
+ err = json.NewDecoder(td).Decode(&expected)
+ require.NoError(t, err)
+
+ for year, daynr := range expected {
+ assert.Equal(t, daynr, mysqlDayNumber(year, 1, 1))
+ }
+}
+
+func TestDayNumberFields(t *testing.T) {
+ td, err := os.Open("testdata/daynr_to_date.json")
+ require.NoError(t, err)
+ defer td.Close()
+
+ var expected [][4]int
+ err = json.NewDecoder(td).Decode(&expected)
+ require.NoError(t, err)
+
+ for _, tc := range expected {
+ y, m, d := mysqlDateFromDayNumber(tc[0])
+ assert.Equal(t, tc[1], int(y))
+ assert.Equal(t, tc[2], int(m))
+ assert.Equal(t, tc[3], int(d))
+
+ assert.Equalf(t, tc[0], mysqlDayNumber(tc[1], tc[2], tc[3]), "date %d-%d-%d", tc[1], tc[2], tc[3])
+ }
+}
diff --git a/go/mysql/datetime/parse.go b/go/mysql/datetime/parse.go
index 1d94a9ba8a5..e8f17191f4c 100644
--- a/go/mysql/datetime/parse.go
+++ b/go/mysql/datetime/parse.go
@@ -321,7 +321,7 @@ func ParseDateTimeInt64(i int64) (dt DateTime, ok bool) {
if i == 0 {
return dt, true
}
- if t == 0 || d == 0 {
+ if d == 0 {
return dt, false
}
dt.Time, ok = ParseTimeInt64(t)
@@ -399,5 +399,10 @@ func ParseTimeDecimal(d decimal.Decimal, l int32, prec int) (Time, int, bool) {
} else {
t = t.Round(prec)
}
+ // We only support a maximum of nanosecond precision,
+ // so if the decimal has any larger precision we truncate it.
+ if prec > 9 {
+ prec = 9
+ }
return t, prec, ok
}
diff --git a/go/mysql/datetime/parse_test.go b/go/mysql/datetime/parse_test.go
index 6b5b489d167..6ed342edfb3 100644
--- a/go/mysql/datetime/parse_test.go
+++ b/go/mysql/datetime/parse_test.go
@@ -17,6 +17,7 @@ limitations under the License.
package datetime
import (
+ "fmt"
"testing"
"github.com/stretchr/testify/assert"
@@ -235,6 +236,7 @@ func TestParseDateTime(t *testing.T) {
{input: "20221012111213.123456", output: datetime{2022, 10, 12, 11, 12, 13, 123456000}, l: 6},
{input: "221012111213.123456", output: datetime{2022, 10, 12, 11, 12, 13, 123456000}, l: 6},
{input: "2022101211121321321312", output: datetime{2022, 10, 12, 11, 12, 13, 0}, err: true},
+ {input: "3284004416225113510", output: datetime{}, err: true},
{input: "2012-12-31 11:30:45", output: datetime{2012, 12, 31, 11, 30, 45, 0}},
{input: "2012^12^31 11+30+45", output: datetime{2012, 12, 31, 11, 30, 45, 0}},
{input: "2012/12/31 11*30*45", output: datetime{2012, 12, 31, 11, 30, 45, 0}},
@@ -290,3 +292,53 @@ func TestParseDateTime(t *testing.T) {
})
}
}
+
+func TestParseDateTimeInt64(t *testing.T) {
+ type datetime struct {
+ year int
+ month int
+ day int
+ hour int
+ minute int
+ second int
+ nanosecond int
+ }
+ tests := []struct {
+ input int64
+ output datetime
+ l int
+ err bool
+ }{
+ {input: 1, output: datetime{}, err: true},
+ {input: 20221012000000, output: datetime{2022, 10, 12, 0, 0, 0, 0}},
+ {input: 20221012112233, output: datetime{2022, 10, 12, 11, 22, 33, 0}},
+ }
+
+ for _, test := range tests {
+ t.Run(fmt.Sprintf("%d", test.input), func(t *testing.T) {
+ got, ok := ParseDateTimeInt64(test.input)
+ if test.err {
+ if !got.IsZero() {
+ assert.Equal(t, test.output.year, got.Date.Year())
+ assert.Equal(t, test.output.month, got.Date.Month())
+ assert.Equal(t, test.output.day, got.Date.Day())
+ assert.Equal(t, test.output.hour, got.Time.Hour())
+ assert.Equal(t, test.output.minute, got.Time.Minute())
+ assert.Equal(t, test.output.second, got.Time.Second())
+ assert.Equal(t, test.output.nanosecond, got.Time.Nanosecond())
+ }
+ assert.Falsef(t, ok, "did not fail to parse %s", test.input)
+ return
+ }
+
+ require.True(t, ok)
+ assert.Equal(t, test.output.year, got.Date.Year())
+ assert.Equal(t, test.output.month, got.Date.Month())
+ assert.Equal(t, test.output.day, got.Date.Day())
+ assert.Equal(t, test.output.hour, got.Time.Hour())
+ assert.Equal(t, test.output.minute, got.Time.Minute())
+ assert.Equal(t, test.output.second, got.Time.Second())
+ assert.Equal(t, test.output.nanosecond, got.Time.Nanosecond())
+ })
+ }
+}
diff --git a/go/mysql/datetime/testdata/daynr_to_date.json b/go/mysql/datetime/testdata/daynr_to_date.json
new file mode 100644
index 00000000000..3bb175d97e2
--- /dev/null
+++ b/go/mysql/datetime/testdata/daynr_to_date.json
@@ -0,0 +1,8188 @@
+[[456, 1, 4, 1],
+[559, 1, 7, 13],
+[572, 1, 7, 26],
+[618, 1, 9, 10],
+[785, 2, 2, 24],
+[911, 2, 6, 30],
+[1067, 2, 12, 3],
+[1173, 3, 3, 19],
+[1214, 3, 4, 29],
+[1402, 3, 11, 3],
+[1518, 4, 2, 27],
+[1680, 4, 8, 7],
+[1706, 4, 9, 2],
+[1805, 4, 12, 10],
+[1829, 5, 1, 3],
+[1861, 5, 2, 4],
+[1891, 5, 3, 6],
+[1983, 5, 6, 6],
+[2043, 5, 8, 5],
+[2223, 6, 2, 1],
+[2260, 6, 3, 10],
+[2367, 6, 6, 25],
+[2449, 6, 9, 15],
+[2533, 6, 12, 8],
+[2682, 7, 5, 6],
+[2769, 7, 8, 1],
+[2881, 7, 11, 21],
+[2962, 8, 2, 10],
+[3014, 8, 4, 2],
+[3206, 8, 10, 11],
+[3253, 8, 11, 27],
+[3416, 9, 5, 9],
+[3422, 9, 5, 15],
+[3601, 9, 11, 10],
+[3637, 9, 12, 16],
+[3794, 10, 5, 22],
+[3885, 10, 8, 21],
+[3966, 10, 11, 10],
+[3978, 10, 11, 22],
+[4012, 10, 12, 26],
+[4087, 11, 3, 11],
+[4101, 11, 3, 25],
+[4138, 11, 5, 1],
+[4317, 11, 10, 27],
+[4421, 12, 2, 8],
+[4440, 12, 2, 27],
+[4450, 12, 3, 8],
+[4615, 12, 8, 20],
+[4671, 12, 10, 15],
+[4855, 13, 4, 17],
+[4879, 13, 5, 11],
+[5057, 13, 11, 5],
+[5257, 14, 5, 24],
+[5272, 14, 6, 8],
+[5313, 14, 7, 19],
+[5323, 14, 7, 29],
+[5409, 14, 10, 23],
+[5525, 15, 2, 16],
+[5656, 15, 6, 27],
+[5829, 15, 12, 17],
+[5914, 16, 3, 11],
+[6010, 16, 6, 15],
+[6104, 16, 9, 17],
+[6241, 17, 2, 1],
+[6303, 17, 4, 4],
+[6397, 17, 7, 7],
+[6516, 17, 11, 3],
+[6695, 18, 5, 1],
+[6833, 18, 9, 16],
+[6994, 19, 2, 24],
+[7061, 19, 5, 2],
+[7110, 19, 6, 20],
+[7229, 19, 10, 17],
+[7314, 20, 1, 10],
+[7374, 20, 3, 10],
+[7462, 20, 6, 6],
+[7555, 20, 9, 7],
+[7736, 21, 3, 7],
+[7862, 21, 7, 11],
+[7926, 21, 9, 13],
+[7938, 21, 9, 25],
+[8021, 21, 12, 17],
+[8087, 22, 2, 21],
+[8159, 22, 5, 4],
+[8218, 22, 7, 2],
+[8233, 22, 7, 17],
+[8337, 22, 10, 29],
+[8488, 23, 3, 29],
+[8612, 23, 7, 31],
+[8677, 23, 10, 4],
+[8705, 23, 11, 1],
+[8783, 24, 1, 18],
+[8815, 24, 2, 19],
+[8944, 24, 6, 27],
+[9077, 24, 11, 7],
+[9218, 25, 3, 28],
+[9376, 25, 9, 2],
+[9512, 26, 1, 16],
+[9628, 26, 5, 12],
+[9764, 26, 9, 25],
+[9862, 27, 1, 1],
+[10027, 27, 6, 15],
+[10161, 27, 10, 27],
+[10273, 28, 2, 16],
+[10373, 28, 5, 26],
+[10542, 28, 11, 11],
+[10700, 29, 4, 18],
+[10875, 29, 10, 10],
+[10995, 30, 2, 7],
+[11121, 30, 6, 13],
+[11157, 30, 7, 19],
+[11314, 30, 12, 23],
+[11498, 31, 6, 25],
+[11603, 31, 10, 8],
+[11779, 32, 4, 1],
+[11931, 32, 8, 31],
+[12026, 32, 12, 4],
+[12063, 33, 1, 10],
+[12127, 33, 3, 15],
+[12306, 33, 9, 10],
+[12337, 33, 10, 11],
+[12491, 34, 3, 14],
+[12657, 34, 8, 27],
+[12832, 35, 2, 18],
+[12877, 35, 4, 4],
+[13005, 35, 8, 10],
+[13038, 35, 9, 12],
+[13198, 36, 2, 19],
+[13346, 36, 7, 16],
+[13532, 37, 1, 18],
+[13701, 37, 7, 6],
+[13727, 37, 8, 1],
+[13829, 37, 11, 11],
+[13849, 37, 12, 1],
+[13969, 38, 3, 31],
+[14112, 38, 8, 21],
+[14173, 38, 10, 21],
+[14177, 38, 10, 25],
+[14363, 39, 4, 29],
+[14513, 39, 9, 26],
+[14678, 40, 3, 9],
+[14846, 40, 8, 24],
+[15036, 41, 3, 2],
+[15159, 41, 7, 3],
+[15266, 41, 10, 18],
+[15450, 42, 4, 20],
+[15618, 42, 10, 5],
+[15683, 42, 12, 9],
+[15754, 43, 2, 18],
+[15883, 43, 6, 27],
+[16058, 43, 12, 19],
+[16082, 44, 1, 12],
+[16198, 44, 5, 7],
+[16375, 44, 10, 31],
+[16467, 45, 1, 31],
+[16486, 45, 2, 19],
+[16511, 45, 3, 16],
+[16642, 45, 7, 25],
+[16751, 45, 11, 11],
+[16949, 46, 5, 28],
+[17077, 46, 10, 3],
+[17116, 46, 11, 11],
+[17206, 47, 2, 9],
+[17388, 47, 8, 10],
+[17562, 48, 1, 31],
+[17741, 48, 7, 28],
+[17873, 48, 12, 7],
+[17963, 49, 3, 7],
+[17983, 49, 3, 27],
+[18069, 49, 6, 21],
+[18144, 49, 9, 4],
+[18343, 50, 3, 22],
+[18387, 50, 5, 5],
+[18519, 50, 9, 14],
+[18645, 51, 1, 18],
+[18729, 51, 4, 12],
+[18742, 51, 4, 25],
+[18839, 51, 7, 31],
+[18890, 51, 9, 20],
+[19027, 52, 2, 4],
+[19042, 52, 2, 19],
+[19128, 52, 5, 15],
+[19296, 52, 10, 30],
+[19416, 53, 2, 27],
+[19566, 53, 7, 27],
+[19624, 53, 9, 23],
+[19685, 53, 11, 23],
+[19821, 54, 4, 8],
+[19909, 54, 7, 5],
+[20006, 54, 10, 10],
+[20139, 55, 2, 20],
+[20336, 55, 9, 5],
+[20505, 56, 2, 21],
+[20655, 56, 7, 20],
+[20841, 57, 1, 22],
+[20906, 57, 3, 28],
+[20931, 57, 4, 22],
+[21114, 57, 10, 22],
+[21158, 57, 12, 5],
+[21246, 58, 3, 3],
+[21414, 58, 8, 18],
+[21528, 58, 12, 10],
+[21550, 59, 1, 1],
+[21582, 59, 2, 2],
+[21731, 59, 7, 1],
+[21903, 59, 12, 20],
+[22062, 60, 5, 27],
+[22128, 60, 8, 1],
+[22212, 60, 10, 24],
+[22411, 61, 5, 11],
+[22568, 61, 10, 15],
+[22591, 61, 11, 7],
+[22647, 62, 1, 2],
+[22710, 62, 3, 6],
+[22840, 62, 7, 14],
+[22850, 62, 7, 24],
+[23046, 63, 2, 5],
+[23231, 63, 8, 9],
+[23248, 63, 8, 26],
+[23273, 63, 9, 20],
+[23347, 63, 12, 3],
+[23444, 64, 3, 9],
+[23482, 64, 4, 16],
+[23682, 64, 11, 2],
+[23806, 65, 3, 6],
+[23957, 65, 8, 4],
+[24061, 65, 11, 16],
+[24120, 66, 1, 14],
+[24160, 66, 2, 23],
+[24298, 66, 7, 11],
+[24450, 66, 12, 10],
+[24567, 67, 4, 6],
+[24704, 67, 8, 21],
+[24773, 67, 10, 29],
+[24973, 68, 5, 16],
+[25079, 68, 8, 30],
+[25170, 68, 11, 29],
+[25350, 69, 5, 28],
+[25496, 69, 10, 21],
+[25611, 70, 2, 13],
+[25732, 70, 6, 14],
+[25782, 70, 8, 3],
+[25937, 71, 1, 5],
+[26109, 71, 6, 26],
+[26300, 72, 1, 3],
+[26319, 72, 1, 22],
+[26325, 72, 1, 28],
+[26448, 72, 5, 30],
+[26627, 72, 11, 25],
+[26752, 73, 3, 30],
+[26775, 73, 4, 22],
+[26836, 73, 6, 22],
+[26870, 73, 7, 26],
+[26967, 73, 10, 31],
+[27013, 73, 12, 16],
+[27028, 73, 12, 31],
+[27164, 74, 5, 16],
+[27350, 74, 11, 18],
+[27412, 75, 1, 19],
+[27503, 75, 4, 20],
+[27692, 75, 10, 26],
+[27700, 75, 11, 3],
+[27875, 76, 4, 26],
+[27937, 76, 6, 27],
+[27984, 76, 8, 13],
+[28058, 76, 10, 26],
+[28217, 77, 4, 3],
+[28264, 77, 5, 20],
+[28436, 77, 11, 8],
+[28620, 78, 5, 11],
+[28741, 78, 9, 9],
+[28878, 79, 1, 24],
+[28916, 79, 3, 3],
+[29032, 79, 6, 27],
+[29084, 79, 8, 18],
+[29096, 79, 8, 30],
+[29136, 79, 10, 9],
+[29143, 79, 10, 16],
+[29218, 79, 12, 30],
+[29335, 80, 4, 25],
+[29361, 80, 5, 21],
+[29388, 80, 6, 17],
+[29468, 80, 9, 5],
+[29482, 80, 9, 19],
+[29665, 81, 3, 21],
+[29666, 81, 3, 22],
+[29672, 81, 3, 28],
+[29759, 81, 6, 23],
+[29861, 81, 10, 3],
+[30050, 82, 4, 10],
+[30101, 82, 5, 31],
+[30293, 82, 12, 9],
+[30338, 83, 1, 23],
+[30513, 83, 7, 17],
+[30667, 83, 12, 18],
+[30859, 84, 6, 27],
+[30925, 84, 9, 1],
+[31078, 85, 2, 1],
+[31101, 85, 2, 24],
+[31134, 85, 3, 29],
+[31284, 85, 8, 26],
+[31400, 85, 12, 20],
+[31470, 86, 2, 28],
+[31628, 86, 8, 5],
+[31803, 87, 1, 27],
+[31850, 87, 3, 15],
+[32031, 87, 9, 12],
+[32093, 87, 11, 13],
+[32293, 88, 5, 31],
+[32411, 88, 9, 26],
+[32565, 89, 2, 27],
+[32710, 89, 7, 22],
+[32889, 90, 1, 17],
+[33066, 90, 7, 13],
+[33243, 91, 1, 6],
+[33370, 91, 5, 13],
+[33462, 91, 8, 13],
+[33619, 92, 1, 17],
+[33730, 92, 5, 7],
+[33874, 92, 9, 28],
+[33925, 92, 11, 18],
+[34037, 93, 3, 10],
+[34229, 93, 9, 18],
+[34411, 94, 3, 19],
+[34448, 94, 4, 25],
+[34602, 94, 9, 26],
+[34747, 95, 2, 18],
+[34914, 95, 8, 4],
+[35003, 95, 11, 1],
+[35106, 96, 2, 12],
+[35159, 96, 4, 5],
+[35182, 96, 4, 28],
+[35382, 96, 11, 14],
+[35387, 96, 11, 19],
+[35555, 97, 5, 6],
+[35636, 97, 7, 26],
+[35710, 97, 10, 8],
+[35876, 98, 3, 23],
+[36055, 98, 9, 18],
+[36182, 99, 1, 23],
+[36336, 99, 6, 26],
+[36510, 99, 12, 17],
+[36523, 99, 12, 30],
+[36646, 100, 5, 2],
+[36803, 100, 10, 6],
+[36881, 100, 12, 23],
+[37053, 101, 6, 13],
+[37159, 101, 9, 27],
+[37316, 102, 3, 3],
+[37388, 102, 5, 14],
+[37545, 102, 10, 18],
+[37624, 103, 1, 5],
+[37666, 103, 2, 16],
+[37705, 103, 3, 27],
+[37809, 103, 7, 9],
+[37836, 103, 8, 5],
+[37868, 103, 9, 6],
+[38031, 104, 2, 16],
+[38175, 104, 7, 9],
+[38269, 104, 10, 11],
+[38361, 105, 1, 11],
+[38483, 105, 5, 13],
+[38642, 105, 10, 19],
+[38714, 105, 12, 30],
+[38795, 106, 3, 21],
+[38893, 106, 6, 27],
+[38983, 106, 9, 25],
+[39116, 107, 2, 5],
+[39262, 107, 7, 1],
+[39336, 107, 9, 13],
+[39456, 108, 1, 11],
+[39521, 108, 3, 16],
+[39529, 108, 3, 24],
+[39719, 108, 9, 30],
+[39888, 109, 3, 18],
+[39988, 109, 6, 26],
+[40092, 109, 10, 8],
+[40152, 109, 12, 7],
+[40244, 110, 3, 9],
+[40410, 110, 8, 22],
+[40480, 110, 10, 31],
+[40508, 110, 11, 28],
+[40514, 110, 12, 4],
+[40662, 111, 5, 1],
+[40850, 111, 11, 5],
+[40854, 111, 11, 9],
+[40951, 112, 2, 14],
+[41039, 112, 5, 12],
+[41166, 112, 9, 16],
+[41269, 112, 12, 28],
+[41427, 113, 6, 4],
+[41575, 113, 10, 30],
+[41633, 113, 12, 27],
+[41641, 114, 1, 4],
+[41682, 114, 2, 14],
+[41694, 114, 2, 26],
+[41774, 114, 5, 17],
+[41890, 114, 9, 10],
+[41893, 114, 9, 13],
+[41933, 114, 10, 23],
+[41963, 114, 11, 22],
+[42012, 115, 1, 10],
+[42013, 115, 1, 11],
+[42117, 115, 4, 25],
+[42164, 115, 6, 11],
+[42331, 115, 11, 25],
+[42481, 116, 4, 23],
+[42521, 116, 6, 2],
+[42525, 116, 6, 6],
+[42557, 116, 7, 8],
+[42604, 116, 8, 24],
+[42612, 116, 9, 1],
+[42744, 117, 1, 11],
+[42898, 117, 6, 14],
+[42914, 117, 6, 30],
+[42961, 117, 8, 16],
+[43134, 118, 2, 5],
+[43222, 118, 5, 4],
+[43346, 118, 9, 5],
+[43386, 118, 10, 15],
+[43495, 119, 2, 1],
+[43683, 119, 8, 8],
+[43812, 119, 12, 15],
+[43950, 120, 5, 1],
+[44027, 120, 7, 17],
+[44156, 120, 11, 23],
+[44242, 121, 2, 17],
+[44379, 121, 7, 4],
+[44392, 121, 7, 17],
+[44588, 122, 1, 29],
+[44763, 122, 7, 23],
+[44828, 122, 9, 26],
+[44837, 122, 10, 5],
+[44905, 122, 12, 12],
+[45032, 123, 4, 18],
+[45066, 123, 5, 22],
+[45114, 123, 7, 9],
+[45243, 123, 11, 15],
+[45268, 123, 12, 10],
+[45286, 123, 12, 28],
+[45436, 124, 5, 26],
+[45571, 124, 10, 8],
+[45572, 124, 10, 9],
+[45713, 125, 2, 27],
+[45771, 125, 4, 26],
+[45919, 125, 9, 21],
+[46098, 126, 3, 19],
+[46221, 126, 7, 20],
+[46403, 127, 1, 18],
+[46492, 127, 4, 17],
+[46599, 127, 8, 2],
+[46604, 127, 8, 7],
+[46613, 127, 8, 16],
+[46672, 127, 10, 14],
+[46773, 128, 1, 23],
+[46825, 128, 3, 15],
+[46848, 128, 4, 7],
+[47037, 128, 10, 13],
+[47075, 128, 11, 20],
+[47225, 129, 4, 19],
+[47235, 129, 4, 29],
+[47401, 129, 10, 12],
+[47567, 130, 3, 27],
+[47735, 130, 9, 11],
+[47768, 130, 10, 14],
+[47873, 131, 1, 27],
+[47977, 131, 5, 11],
+[48124, 131, 10, 5],
+[48274, 132, 3, 3],
+[48351, 132, 5, 19],
+[48373, 132, 6, 10],
+[48474, 132, 9, 19],
+[48497, 132, 10, 12],
+[48619, 133, 2, 11],
+[48631, 133, 2, 23],
+[48745, 133, 6, 17],
+[48793, 133, 8, 4],
+[48935, 133, 12, 24],
+[49011, 134, 3, 10],
+[49058, 134, 4, 26],
+[49108, 134, 6, 15],
+[49174, 134, 8, 20],
+[49242, 134, 10, 27],
+[49278, 134, 12, 2],
+[49366, 135, 2, 28],
+[49435, 135, 5, 8],
+[49606, 135, 10, 26],
+[49786, 136, 4, 23],
+[49931, 136, 9, 15],
+[50044, 137, 1, 6],
+[50127, 137, 3, 30],
+[50258, 137, 8, 8],
+[50315, 137, 10, 4],
+[50438, 138, 2, 4],
+[50572, 138, 6, 18],
+[50630, 138, 8, 15],
+[50633, 138, 8, 18],
+[50799, 139, 1, 31],
+[50981, 139, 8, 1],
+[51027, 139, 9, 16],
+[51084, 139, 11, 12],
+[51200, 140, 3, 7],
+[51229, 140, 4, 5],
+[51389, 140, 9, 12],
+[51464, 140, 11, 26],
+[51654, 141, 6, 4],
+[51801, 141, 10, 29],
+[51925, 142, 3, 2],
+[52074, 142, 7, 29],
+[52191, 142, 11, 23],
+[52284, 143, 2, 24],
+[52431, 143, 7, 21],
+[52504, 143, 10, 2],
+[52587, 143, 12, 24],
+[52661, 144, 3, 7],
+[52728, 144, 5, 13],
+[52771, 144, 6, 25],
+[52914, 144, 11, 15],
+[52978, 145, 1, 18],
+[53001, 145, 2, 10],
+[53085, 145, 5, 5],
+[53244, 145, 10, 11],
+[53296, 145, 12, 2],
+[53330, 146, 1, 5],
+[53372, 146, 2, 16],
+[53385, 146, 3, 1],
+[53401, 146, 3, 17],
+[53502, 146, 6, 26],
+[53516, 146, 7, 10],
+[53574, 146, 9, 6],
+[53654, 146, 11, 25],
+[53702, 147, 1, 12],
+[53846, 147, 6, 5],
+[53892, 147, 7, 21],
+[54067, 148, 1, 12],
+[54105, 148, 2, 19],
+[54304, 148, 9, 5],
+[54374, 148, 11, 14],
+[54559, 149, 5, 18],
+[54586, 149, 6, 14],
+[54745, 149, 11, 20],
+[54843, 150, 2, 26],
+[54932, 150, 5, 26],
+[54937, 150, 5, 31],
+[54972, 150, 7, 5],
+[54981, 150, 7, 14],
+[54991, 150, 7, 24],
+[55008, 150, 8, 10],
+[55063, 150, 10, 4],
+[55095, 150, 11, 5],
+[55279, 151, 5, 8],
+[55308, 151, 6, 6],
+[55312, 151, 6, 10],
+[55406, 151, 9, 12],
+[55441, 151, 10, 17],
+[55491, 151, 12, 6],
+[55590, 152, 3, 14],
+[55756, 152, 8, 27],
+[55776, 152, 9, 16],
+[55834, 152, 11, 13],
+[55935, 153, 2, 22],
+[55986, 153, 4, 14],
+[56105, 153, 8, 11],
+[56139, 153, 9, 14],
+[56315, 154, 3, 9],
+[56343, 154, 4, 6],
+[56406, 154, 6, 8],
+[56550, 154, 10, 30],
+[56706, 155, 4, 4],
+[56906, 155, 10, 21],
+[56964, 155, 12, 18],
+[57118, 156, 5, 20],
+[57256, 156, 10, 5],
+[57419, 157, 3, 17],
+[57474, 157, 5, 11],
+[57608, 157, 9, 22],
+[57719, 158, 1, 11],
+[57725, 158, 1, 17],
+[57814, 158, 4, 16],
+[57878, 158, 6, 19],
+[57881, 158, 6, 22],
+[58019, 158, 11, 7],
+[58026, 158, 11, 14],
+[58084, 159, 1, 11],
+[58105, 159, 2, 1],
+[58135, 159, 3, 3],
+[58292, 159, 8, 7],
+[58483, 160, 2, 14],
+[58557, 160, 4, 28],
+[58639, 160, 7, 19],
+[58665, 160, 8, 14],
+[58812, 161, 1, 8],
+[58822, 161, 1, 18],
+[58961, 161, 6, 6],
+[59055, 161, 9, 8],
+[59235, 162, 3, 7],
+[59304, 162, 5, 15],
+[59372, 162, 7, 22],
+[59407, 162, 8, 26],
+[59488, 162, 11, 15],
+[59627, 163, 4, 3],
+[59690, 163, 6, 5],
+[59870, 163, 12, 2],
+[59876, 163, 12, 8],
+[59954, 164, 2, 24],
+[60106, 164, 7, 25],
+[60153, 164, 9, 10],
+[60179, 164, 10, 6],
+[60315, 165, 2, 19],
+[60353, 165, 3, 29],
+[60517, 165, 9, 9],
+[60615, 165, 12, 16],
+[60668, 166, 2, 7],
+[60729, 166, 4, 9],
+[60760, 166, 5, 10],
+[60766, 166, 5, 16],
+[60876, 166, 9, 3],
+[60948, 166, 11, 14],
+[60987, 166, 12, 23],
+[61185, 167, 7, 9],
+[61341, 167, 12, 12],
+[61521, 168, 6, 9],
+[61576, 168, 8, 3],
+[61714, 168, 12, 19],
+[61836, 169, 4, 20],
+[61890, 169, 6, 13],
+[62060, 169, 11, 30],
+[62153, 170, 3, 3],
+[62239, 170, 5, 28],
+[62305, 170, 8, 2],
+[62352, 170, 9, 18],
+[62444, 170, 12, 19],
+[62625, 171, 6, 18],
+[62628, 171, 6, 21],
+[62782, 171, 11, 22],
+[62793, 171, 12, 3],
+[62808, 171, 12, 18],
+[62888, 172, 3, 7],
+[62901, 172, 3, 20],
+[62948, 172, 5, 6],
+[63060, 172, 8, 26],
+[63242, 173, 2, 24],
+[63425, 173, 8, 26],
+[63587, 174, 2, 4],
+[63733, 174, 6, 30],
+[63752, 174, 7, 19],
+[63927, 175, 1, 10],
+[63970, 175, 2, 22],
+[64083, 175, 6, 15],
+[64176, 175, 9, 16],
+[64214, 175, 10, 24],
+[64361, 176, 3, 19],
+[64497, 176, 8, 2],
+[64528, 176, 9, 2],
+[64721, 177, 3, 14],
+[64783, 177, 5, 15],
+[64914, 177, 9, 23],
+[64926, 177, 10, 5],
+[65059, 178, 2, 15],
+[65107, 178, 4, 4],
+[65209, 178, 7, 15],
+[65377, 178, 12, 30],
+[65489, 179, 4, 21],
+[65532, 179, 6, 3],
+[65596, 179, 8, 6],
+[65784, 180, 2, 10],
+[65917, 180, 6, 22],
+[65995, 180, 9, 8],
+[66102, 180, 12, 24],
+[66228, 181, 4, 29],
+[66232, 181, 5, 3],
+[66296, 181, 7, 6],
+[66429, 181, 11, 16],
+[66529, 182, 2, 24],
+[66708, 182, 8, 22],
+[66846, 183, 1, 7],
+[66911, 183, 3, 13],
+[66977, 183, 5, 18],
+[67144, 183, 11, 1],
+[67165, 183, 11, 22],
+[67289, 184, 3, 25],
+[67305, 184, 4, 10],
+[67425, 184, 8, 8],
+[67517, 184, 11, 8],
+[67706, 185, 5, 16],
+[67715, 185, 5, 25],
+[67885, 185, 11, 11],
+[68064, 186, 5, 9],
+[68194, 186, 9, 16],
+[68385, 187, 3, 26],
+[68545, 187, 9, 2],
+[68680, 188, 1, 15],
+[68687, 188, 1, 22],
+[68852, 188, 7, 5],
+[68943, 188, 10, 4],
+[68948, 188, 10, 9],
+[69103, 189, 3, 13],
+[69160, 189, 5, 9],
+[69167, 189, 5, 16],
+[69236, 189, 7, 24],
+[69254, 189, 8, 11],
+[69400, 190, 1, 4],
+[69489, 190, 4, 3],
+[69573, 190, 6, 26],
+[69726, 190, 11, 26],
+[69803, 191, 2, 11],
+[69806, 191, 2, 14],
+[69830, 191, 3, 10],
+[70029, 191, 9, 25],
+[70211, 192, 3, 25],
+[70404, 192, 10, 4],
+[70529, 193, 2, 6],
+[70715, 193, 8, 11],
+[70774, 193, 10, 9],
+[70883, 194, 1, 26],
+[71004, 194, 5, 27],
+[71022, 194, 6, 14],
+[71067, 194, 7, 29],
+[71172, 194, 11, 11],
+[71286, 195, 3, 5],
+[71466, 195, 9, 1],
+[71575, 195, 12, 19],
+[71616, 196, 1, 29],
+[71618, 196, 1, 31],
+[71812, 196, 8, 12],
+[71836, 196, 9, 5],
+[72026, 197, 3, 14],
+[72157, 197, 7, 23],
+[72163, 197, 7, 29],
+[72351, 198, 2, 2],
+[72466, 198, 5, 28],
+[72549, 198, 8, 19],
+[72578, 198, 9, 17],
+[72620, 198, 10, 29],
+[72745, 199, 3, 3],
+[72859, 199, 6, 25],
+[72964, 199, 10, 8],
+[73117, 200, 3, 10],
+[73247, 200, 7, 18],
+[73252, 200, 7, 23],
+[73418, 201, 1, 5],
+[73440, 201, 1, 27],
+[73460, 201, 2, 16],
+[73543, 201, 5, 10],
+[73599, 201, 7, 5],
+[73759, 201, 12, 12],
+[73783, 202, 1, 5],
+[73959, 202, 6, 30],
+[74041, 202, 9, 20],
+[74079, 202, 10, 28],
+[74095, 202, 11, 13],
+[74277, 203, 5, 14],
+[74459, 203, 11, 12],
+[74476, 203, 11, 29],
+[74559, 204, 2, 20],
+[74650, 204, 5, 21],
+[74815, 204, 11, 2],
+[74829, 204, 11, 16],
+[74922, 205, 2, 17],
+[75008, 205, 5, 14],
+[75164, 205, 10, 17],
+[75352, 206, 4, 23],
+[75484, 206, 9, 2],
+[75592, 206, 12, 19],
+[75605, 207, 1, 1],
+[75771, 207, 6, 16],
+[75843, 207, 8, 27],
+[75857, 207, 9, 10],
+[75905, 207, 10, 28],
+[75950, 207, 12, 12],
+[76098, 208, 5, 8],
+[76206, 208, 8, 24],
+[76383, 209, 2, 17],
+[76523, 209, 7, 7],
+[76680, 209, 12, 11],
+[76728, 210, 1, 28],
+[76925, 210, 8, 13],
+[77075, 211, 1, 10],
+[77263, 211, 7, 17],
+[77453, 212, 1, 23],
+[77460, 212, 1, 30],
+[77486, 212, 2, 25],
+[77487, 212, 2, 26],
+[77544, 212, 4, 23],
+[77587, 212, 6, 5],
+[77711, 212, 10, 7],
+[77730, 212, 10, 26],
+[77771, 212, 12, 6],
+[77875, 213, 3, 20],
+[77885, 213, 3, 30],
+[77947, 213, 5, 31],
+[78112, 213, 11, 12],
+[78192, 214, 1, 31],
+[78288, 214, 5, 7],
+[78382, 214, 8, 9],
+[78522, 214, 12, 27],
+[78604, 215, 3, 19],
+[78778, 215, 9, 9],
+[78787, 215, 9, 18],
+[78972, 216, 3, 21],
+[78975, 216, 3, 24],
+[79175, 216, 10, 10],
+[79249, 216, 12, 23],
+[79306, 217, 2, 18],
+[79489, 217, 8, 20],
+[79676, 218, 2, 23],
+[79762, 218, 5, 20],
+[79857, 218, 8, 23],
+[79961, 218, 12, 5],
+[80134, 219, 5, 27],
+[80236, 219, 9, 6],
+[80321, 219, 11, 30],
+[80472, 220, 4, 29],
+[80541, 220, 7, 7],
+[80657, 220, 10, 31],
+[80830, 221, 4, 22],
+[80985, 221, 9, 24],
+[81176, 222, 4, 3],
+[81360, 222, 10, 4],
+[81378, 222, 10, 22],
+[81409, 222, 11, 22],
+[81593, 223, 5, 25],
+[81786, 223, 12, 4],
+[81965, 224, 5, 31],
+[81979, 224, 6, 14],
+[81999, 224, 7, 4],
+[82070, 224, 9, 13],
+[82130, 224, 11, 12],
+[82276, 225, 4, 7],
+[82413, 225, 8, 22],
+[82578, 226, 2, 3],
+[82722, 226, 6, 27],
+[82730, 226, 7, 5],
+[82734, 226, 7, 9],
+[82844, 226, 10, 27],
+[82955, 227, 2, 15],
+[83127, 227, 8, 6],
+[83254, 227, 12, 11],
+[83351, 228, 3, 17],
+[83503, 228, 8, 16],
+[83667, 229, 1, 27],
+[83822, 229, 7, 1],
+[83927, 229, 10, 14],
+[84028, 230, 1, 23],
+[84114, 230, 4, 19],
+[84149, 230, 5, 24],
+[84226, 230, 8, 9],
+[84354, 230, 12, 15],
+[84489, 231, 4, 29],
+[84507, 231, 5, 17],
+[84684, 231, 11, 10],
+[84763, 232, 1, 28],
+[84845, 232, 4, 19],
+[85006, 232, 9, 27],
+[85018, 232, 10, 9],
+[85155, 233, 2, 23],
+[85290, 233, 7, 8],
+[85486, 234, 1, 20],
+[85528, 234, 3, 3],
+[85670, 234, 7, 23],
+[85710, 234, 9, 1],
+[85782, 234, 11, 12],
+[85830, 234, 12, 30],
+[85992, 235, 6, 10],
+[86076, 235, 9, 2],
+[86099, 235, 9, 25],
+[86281, 236, 3, 25],
+[86316, 236, 4, 29],
+[86456, 236, 9, 16],
+[86500, 236, 10, 30],
+[86629, 237, 3, 8],
+[86711, 237, 5, 29],
+[86818, 237, 9, 13],
+[86915, 237, 12, 19],
+[86977, 238, 2, 19],
+[87128, 238, 7, 20],
+[87270, 238, 12, 9],
+[87469, 239, 6, 26],
+[87557, 239, 9, 22],
+[87642, 239, 12, 16],
+[87783, 240, 5, 5],
+[87970, 240, 11, 8],
+[88020, 240, 12, 28],
+[88024, 241, 1, 1],
+[88128, 241, 4, 15],
+[88207, 241, 7, 3],
+[88336, 241, 11, 9],
+[88345, 241, 11, 18],
+[88378, 241, 12, 21],
+[88519, 242, 5, 11],
+[88556, 242, 6, 17],
+[88730, 242, 12, 8],
+[88880, 243, 5, 7],
+[89041, 243, 10, 15],
+[89059, 243, 11, 2],
+[89167, 244, 2, 18],
+[89245, 244, 5, 6],
+[89417, 244, 10, 25],
+[89614, 245, 5, 10],
+[89628, 245, 5, 24],
+[89696, 245, 7, 31],
+[89735, 245, 9, 8],
+[89793, 245, 11, 5],
+[89858, 246, 1, 9],
+[90055, 246, 7, 25],
+[90210, 246, 12, 27],
+[90246, 247, 2, 1],
+[90301, 247, 3, 28],
+[90379, 247, 6, 14],
+[90464, 247, 9, 7],
+[90653, 248, 3, 14],
+[90792, 248, 7, 31],
+[90886, 248, 11, 2],
+[90930, 248, 12, 16],
+[91126, 249, 6, 30],
+[91260, 249, 11, 11],
+[91340, 250, 1, 30],
+[91392, 250, 3, 23],
+[91507, 250, 7, 16],
+[91661, 250, 12, 17],
+[91680, 251, 1, 5],
+[91722, 251, 2, 16],
+[91893, 251, 8, 6],
+[92022, 251, 12, 13],
+[92078, 252, 2, 7],
+[92277, 252, 8, 24],
+[92404, 252, 12, 29],
+[92448, 253, 2, 11],
+[92621, 253, 8, 3],
+[92696, 253, 10, 17],
+[92889, 254, 4, 28],
+[93011, 254, 8, 28],
+[93165, 255, 1, 29],
+[93364, 255, 8, 16],
+[93451, 255, 11, 11],
+[93651, 256, 5, 29],
+[93749, 256, 9, 4],
+[93817, 256, 11, 11],
+[93980, 257, 4, 23],
+[94112, 257, 9, 2],
+[94146, 257, 10, 6],
+[94172, 257, 11, 1],
+[94178, 257, 11, 7],
+[94255, 258, 1, 23],
+[94437, 258, 7, 24],
+[94501, 258, 9, 26],
+[94528, 258, 10, 23],
+[94661, 259, 3, 5],
+[94725, 259, 5, 8],
+[94802, 259, 7, 24],
+[94990, 260, 1, 28],
+[95114, 260, 5, 31],
+[95164, 260, 7, 20],
+[95211, 260, 9, 5],
+[95333, 261, 1, 5],
+[95409, 261, 3, 22],
+[95572, 261, 9, 1],
+[95669, 261, 12, 7],
+[95757, 262, 3, 5],
+[95768, 262, 3, 16],
+[95938, 262, 9, 2],
+[96123, 263, 3, 6],
+[96141, 263, 3, 24],
+[96285, 263, 8, 15],
+[96383, 263, 11, 21],
+[96486, 264, 3, 3],
+[96544, 264, 4, 30],
+[96727, 264, 10, 30],
+[96805, 265, 1, 16],
+[96870, 265, 3, 22],
+[96950, 265, 6, 10],
+[96989, 265, 7, 19],
+[97108, 265, 11, 15],
+[97167, 266, 1, 13],
+[97246, 266, 4, 2],
+[97281, 266, 5, 7],
+[97391, 266, 8, 25],
+[97415, 266, 9, 18],
+[97508, 266, 12, 20],
+[97670, 267, 5, 31],
+[97835, 267, 11, 12],
+[98002, 268, 4, 27],
+[98083, 268, 7, 17],
+[98180, 268, 10, 22],
+[98364, 269, 4, 24],
+[98394, 269, 5, 24],
+[98418, 269, 6, 17],
+[98549, 269, 10, 26],
+[98697, 270, 3, 23],
+[98720, 270, 4, 15],
+[98912, 270, 10, 24],
+[99005, 271, 1, 25],
+[99074, 271, 4, 4],
+[99150, 271, 6, 19],
+[99346, 272, 1, 1],
+[99513, 272, 6, 16],
+[99569, 272, 8, 11],
+[99672, 272, 11, 22],
+[99844, 273, 5, 13],
+[99891, 273, 6, 29],
+[99982, 273, 9, 28],
+[100180, 274, 4, 14],
+[100331, 274, 9, 12],
+[100477, 275, 2, 5],
+[100627, 275, 7, 5],
+[100659, 275, 8, 6],
+[100741, 275, 10, 27],
+[100847, 276, 2, 10],
+[101009, 276, 7, 21],
+[101066, 276, 9, 16],
+[101123, 276, 11, 12],
+[101252, 277, 3, 21],
+[101375, 277, 7, 22],
+[101443, 277, 9, 28],
+[101504, 277, 11, 28],
+[101680, 278, 5, 23],
+[101746, 278, 7, 28],
+[101849, 278, 11, 8],
+[101969, 279, 3, 8],
+[102076, 279, 6, 23],
+[102157, 279, 9, 12],
+[102206, 279, 10, 31],
+[102291, 280, 1, 24],
+[102432, 280, 6, 13],
+[102502, 280, 8, 22],
+[102608, 280, 12, 6],
+[102617, 280, 12, 15],
+[102808, 281, 6, 24],
+[102839, 281, 7, 25],
+[102884, 281, 9, 8],
+[102988, 281, 12, 21],
+[103109, 282, 4, 21],
+[103276, 282, 10, 5],
+[103470, 283, 4, 17],
+[103595, 283, 8, 20],
+[103739, 284, 1, 11],
+[103795, 284, 3, 7],
+[103935, 284, 7, 25],
+[104118, 285, 1, 24],
+[104198, 285, 4, 14],
+[104280, 285, 7, 5],
+[104454, 285, 12, 26],
+[104532, 286, 3, 14],
+[104679, 286, 8, 8],
+[104716, 286, 9, 14],
+[104718, 286, 9, 16],
+[104876, 287, 2, 21],
+[104934, 287, 4, 20],
+[105117, 287, 10, 20],
+[105315, 288, 5, 5],
+[105405, 288, 8, 3],
+[105602, 289, 2, 16],
+[105692, 289, 5, 17],
+[105877, 289, 11, 18],
+[106025, 290, 4, 15],
+[106159, 290, 8, 27],
+[106305, 291, 1, 20],
+[106455, 291, 6, 19],
+[106536, 291, 9, 8],
+[106716, 292, 3, 6],
+[106816, 292, 6, 14],
+[106820, 292, 6, 18],
+[106834, 292, 7, 2],
+[106918, 292, 9, 24],
+[107071, 293, 2, 24],
+[107141, 293, 5, 5],
+[107187, 293, 6, 20],
+[107242, 293, 8, 14],
+[107299, 293, 10, 10],
+[107499, 294, 4, 28],
+[107640, 294, 9, 16],
+[107833, 295, 3, 28],
+[107874, 295, 5, 8],
+[107980, 295, 8, 22],
+[108078, 295, 11, 28],
+[108102, 295, 12, 22],
+[108188, 296, 3, 17],
+[108193, 296, 3, 22],
+[108232, 296, 4, 30],
+[108292, 296, 6, 29],
+[108308, 296, 7, 15],
+[108423, 296, 11, 7],
+[108509, 297, 2, 1],
+[108658, 297, 6, 30],
+[108837, 297, 12, 26],
+[108863, 298, 1, 21],
+[108978, 298, 5, 16],
+[109095, 298, 9, 10],
+[109286, 299, 3, 20],
+[109461, 299, 9, 11],
+[109488, 299, 10, 8],
+[109511, 299, 10, 31],
+[109598, 300, 1, 26],
+[109599, 300, 1, 27],
+[109763, 300, 7, 10],
+[109852, 300, 10, 7],
+[109896, 300, 11, 20],
+[109944, 301, 1, 7],
+[110047, 301, 4, 20],
+[110174, 301, 8, 25],
+[110308, 302, 1, 6],
+[110340, 302, 2, 7],
+[110486, 302, 7, 3],
+[110606, 302, 10, 31],
+[110667, 302, 12, 31],
+[110809, 303, 5, 22],
+[110811, 303, 5, 24],
+[110929, 303, 9, 19],
+[111107, 304, 3, 15],
+[111259, 304, 8, 14],
+[111298, 304, 9, 22],
+[111469, 305, 3, 12],
+[111610, 305, 7, 31],
+[111720, 305, 11, 18],
+[111751, 305, 12, 19],
+[111804, 306, 2, 10],
+[111822, 306, 2, 28],
+[111953, 306, 7, 9],
+[112135, 307, 1, 7],
+[112285, 307, 6, 6],
+[112296, 307, 6, 17],
+[112457, 307, 11, 25],
+[112493, 307, 12, 31],
+[112665, 308, 6, 20],
+[112686, 308, 7, 11],
+[112783, 308, 10, 16],
+[112967, 309, 4, 18],
+[113063, 309, 7, 23],
+[113158, 309, 10, 26],
+[113344, 310, 4, 30],
+[113374, 310, 5, 30],
+[113457, 310, 8, 21],
+[113612, 311, 1, 23],
+[113667, 311, 3, 19],
+[113840, 311, 9, 8],
+[113902, 311, 11, 9],
+[114074, 312, 4, 29],
+[114190, 312, 8, 23],
+[114261, 312, 11, 2],
+[114386, 313, 3, 7],
+[114467, 313, 5, 27],
+[114581, 313, 9, 18],
+[114663, 313, 12, 9],
+[114790, 314, 4, 15],
+[114894, 314, 7, 28],
+[114986, 314, 10, 28],
+[115062, 315, 1, 12],
+[115082, 315, 2, 1],
+[115083, 315, 2, 2],
+[115113, 315, 3, 4],
+[115268, 315, 8, 6],
+[115390, 315, 12, 6],
+[115484, 316, 3, 9],
+[115635, 316, 8, 7],
+[115791, 317, 1, 10],
+[115848, 317, 3, 8],
+[116035, 317, 9, 11],
+[116122, 317, 12, 7],
+[116230, 318, 3, 25],
+[116362, 318, 8, 4],
+[116416, 318, 9, 27],
+[116435, 318, 10, 16],
+[116626, 319, 4, 25],
+[116761, 319, 9, 7],
+[116900, 320, 1, 24],
+[117069, 320, 7, 11],
+[117088, 320, 7, 30],
+[117206, 320, 11, 25],
+[117365, 321, 5, 3],
+[117514, 321, 9, 29],
+[117520, 321, 10, 5],
+[117692, 322, 3, 26],
+[117886, 322, 10, 6],
+[117968, 322, 12, 27],
+[118103, 323, 5, 11],
+[118268, 323, 10, 23],
+[118333, 323, 12, 27],
+[118339, 324, 1, 2],
+[118448, 324, 4, 20],
+[118573, 324, 8, 23],
+[118591, 324, 9, 10],
+[118724, 325, 1, 21],
+[118894, 325, 7, 10],
+[118946, 325, 8, 31],
+[119091, 326, 1, 23],
+[119273, 326, 7, 24],
+[119380, 326, 11, 8],
+[119579, 327, 5, 26],
+[119602, 327, 6, 18],
+[119640, 327, 7, 26],
+[119702, 327, 9, 26],
+[119790, 327, 12, 23],
+[119926, 328, 5, 7],
+[120054, 328, 9, 12],
+[120239, 329, 3, 16],
+[120436, 329, 9, 29],
+[120598, 330, 3, 10],
+[120679, 330, 5, 30],
+[120824, 330, 10, 22],
+[120961, 331, 3, 8],
+[121143, 331, 9, 6],
+[121162, 331, 9, 25],
+[121216, 331, 11, 18],
+[121230, 331, 12, 2],
+[121419, 332, 6, 8],
+[121608, 332, 12, 14],
+[121639, 333, 1, 14],
+[121664, 333, 2, 8],
+[121679, 333, 2, 23],
+[121709, 333, 3, 25],
+[121783, 333, 6, 7],
+[121823, 333, 7, 17],
+[121858, 333, 8, 21],
+[121939, 333, 11, 10],
+[121991, 334, 1, 1],
+[122133, 334, 5, 23],
+[122200, 334, 7, 29],
+[122345, 334, 12, 21],
+[122507, 335, 6, 1],
+[122539, 335, 7, 3],
+[122684, 335, 11, 25],
+[122730, 336, 1, 10],
+[122813, 336, 4, 2],
+[122862, 336, 5, 21],
+[123009, 336, 10, 15],
+[123097, 337, 1, 11],
+[123293, 337, 7, 26],
+[123323, 337, 8, 25],
+[123330, 337, 9, 1],
+[123500, 338, 2, 18],
+[123535, 338, 3, 25],
+[123696, 338, 9, 2],
+[123713, 338, 9, 19],
+[123852, 339, 2, 5],
+[123930, 339, 4, 24],
+[123985, 339, 6, 18],
+[123994, 339, 6, 27],
+[124090, 339, 10, 1],
+[124237, 340, 2, 25],
+[124427, 340, 9, 2],
+[124613, 341, 3, 7],
+[124644, 341, 4, 7],
+[124671, 341, 5, 4],
+[124766, 341, 8, 7],
+[124837, 341, 10, 17],
+[124969, 342, 2, 26],
+[125075, 342, 6, 12],
+[125217, 342, 11, 1],
+[125385, 343, 4, 18],
+[125477, 343, 7, 19],
+[125663, 344, 1, 21],
+[125854, 344, 7, 30],
+[125987, 344, 12, 10],
+[126079, 345, 3, 12],
+[126241, 345, 8, 21],
+[126386, 346, 1, 13],
+[126528, 346, 6, 4],
+[126701, 346, 11, 24],
+[126878, 347, 5, 20],
+[126990, 347, 9, 9],
+[127151, 348, 2, 17],
+[127292, 348, 7, 7],
+[127376, 348, 9, 29],
+[127451, 348, 12, 13],
+[127507, 349, 2, 7],
+[127661, 349, 7, 11],
+[127737, 349, 9, 25],
+[127787, 349, 11, 14],
+[127874, 350, 2, 9],
+[128042, 350, 7, 27],
+[128140, 350, 11, 2],
+[128327, 351, 5, 8],
+[128362, 351, 6, 12],
+[128537, 351, 12, 4],
+[128613, 352, 2, 18],
+[128623, 352, 2, 28],
+[128694, 352, 5, 9],
+[128799, 352, 8, 22],
+[128895, 352, 11, 26],
+[129061, 353, 5, 11],
+[129067, 353, 5, 17],
+[129208, 353, 10, 5],
+[129403, 354, 4, 18],
+[129524, 354, 8, 17],
+[129719, 355, 2, 28],
+[129809, 355, 5, 29],
+[129849, 355, 7, 8],
+[129985, 355, 11, 21],
+[130177, 356, 5, 31],
+[130363, 356, 12, 3],
+[130558, 357, 6, 16],
+[130666, 357, 10, 2],
+[130782, 358, 1, 26],
+[130833, 358, 3, 18],
+[130861, 358, 4, 15],
+[131027, 358, 9, 28],
+[131159, 359, 2, 7],
+[131340, 359, 8, 7],
+[131380, 359, 9, 16],
+[131548, 360, 3, 2],
+[131655, 360, 6, 17],
+[131776, 360, 10, 16],
+[131825, 360, 12, 4],
+[131883, 361, 1, 31],
+[132061, 361, 7, 28],
+[132186, 361, 11, 30],
+[132201, 361, 12, 15],
+[132295, 362, 3, 19],
+[132337, 362, 4, 30],
+[132481, 362, 9, 21],
+[132504, 362, 10, 14],
+[132639, 363, 2, 26],
+[132747, 363, 6, 14],
+[132784, 363, 7, 21],
+[132933, 363, 12, 17],
+[132962, 364, 1, 15],
+[133090, 364, 5, 22],
+[133119, 364, 6, 20],
+[133197, 364, 9, 6],
+[133292, 364, 12, 10],
+[133409, 365, 4, 6],
+[133453, 365, 5, 20],
+[133571, 365, 9, 15],
+[133679, 366, 1, 1],
+[133720, 366, 2, 11],
+[133914, 366, 8, 24],
+[133964, 366, 10, 13],
+[134091, 367, 2, 17],
+[134286, 367, 8, 31],
+[134424, 368, 1, 16],
+[134527, 368, 4, 28],
+[134553, 368, 5, 24],
+[134709, 368, 10, 27],
+[134798, 369, 1, 24],
+[134885, 369, 4, 21],
+[134904, 369, 5, 10],
+[134927, 369, 6, 2],
+[134994, 369, 8, 8],
+[135098, 369, 11, 20],
+[135172, 370, 2, 2],
+[135220, 370, 3, 22],
+[135353, 370, 8, 2],
+[135467, 370, 11, 24],
+[135665, 371, 6, 10],
+[135811, 371, 11, 3],
+[135934, 372, 3, 5],
+[136045, 372, 6, 24],
+[136061, 372, 7, 10],
+[136106, 372, 8, 24],
+[136163, 372, 10, 20],
+[136202, 372, 11, 28],
+[136297, 373, 3, 3],
+[136317, 373, 3, 23],
+[136509, 373, 10, 1],
+[136552, 373, 11, 13],
+[136671, 374, 3, 12],
+[136809, 374, 7, 28],
+[137003, 375, 2, 7],
+[137163, 375, 7, 17],
+[137259, 375, 10, 21],
+[137345, 376, 1, 15],
+[137418, 376, 3, 28],
+[137484, 376, 6, 2],
+[137627, 376, 10, 23],
+[137664, 376, 11, 29],
+[137795, 377, 4, 9],
+[137834, 377, 5, 18],
+[137906, 377, 7, 29],
+[137983, 377, 10, 14],
+[138110, 378, 2, 18],
+[138265, 378, 7, 23],
+[138332, 378, 9, 28],
+[138377, 378, 11, 12],
+[138382, 378, 11, 17],
+[138580, 379, 6, 3],
+[138774, 379, 12, 14],
+[138938, 380, 5, 26],
+[138947, 380, 6, 4],
+[138997, 380, 7, 24],
+[139176, 381, 1, 19],
+[139234, 381, 3, 18],
+[139321, 381, 6, 13],
+[139521, 381, 12, 30],
+[139708, 382, 7, 5],
+[139828, 382, 11, 2],
+[139908, 383, 1, 21],
+[139960, 383, 3, 14],
+[139997, 383, 4, 20],
+[140028, 383, 5, 21],
+[140046, 383, 6, 8],
+[140233, 383, 12, 12],
+[140257, 384, 1, 5],
+[140282, 384, 1, 30],
+[140463, 384, 7, 29],
+[140464, 384, 7, 30],
+[140604, 384, 12, 17],
+[140738, 385, 4, 30],
+[140773, 385, 6, 4],
+[140835, 385, 8, 5],
+[140850, 385, 8, 20],
+[141042, 386, 2, 28],
+[141183, 386, 7, 19],
+[141260, 386, 10, 4],
+[141324, 386, 12, 7],
+[141333, 386, 12, 16],
+[141448, 387, 4, 10],
+[141639, 387, 10, 18],
+[141767, 388, 2, 23],
+[141781, 388, 3, 8],
+[141826, 388, 4, 22],
+[141951, 388, 8, 25],
+[142005, 388, 10, 18],
+[142068, 388, 12, 20],
+[142186, 389, 4, 17],
+[142195, 389, 4, 26],
+[142380, 389, 10, 28],
+[142479, 390, 2, 4],
+[142484, 390, 2, 9],
+[142660, 390, 8, 4],
+[142838, 391, 1, 29],
+[142926, 391, 4, 27],
+[142994, 391, 7, 4],
+[142996, 391, 7, 6],
+[143058, 391, 9, 6],
+[143123, 391, 11, 10],
+[143152, 391, 12, 9],
+[143320, 392, 5, 25],
+[143507, 392, 11, 28],
+[143547, 393, 1, 7],
+[143726, 393, 7, 5],
+[143744, 393, 7, 23],
+[143817, 393, 10, 4],
+[143921, 394, 1, 16],
+[144046, 394, 5, 21],
+[144077, 394, 6, 21],
+[144166, 394, 9, 18],
+[144190, 394, 10, 12],
+[144245, 394, 12, 6],
+[144309, 395, 2, 8],
+[144488, 395, 8, 6],
+[144610, 395, 12, 6],
+[144630, 395, 12, 26],
+[144690, 396, 2, 24],
+[144820, 396, 7, 3],
+[144871, 396, 8, 23],
+[144961, 396, 11, 21],
+[144995, 396, 12, 25],
+[145093, 397, 4, 2],
+[145165, 397, 6, 13],
+[145286, 397, 10, 12],
+[145393, 398, 1, 27],
+[145540, 398, 6, 23],
+[145700, 398, 11, 30],
+[145795, 399, 3, 5],
+[145808, 399, 3, 18],
+[145913, 399, 7, 1],
+[145983, 399, 9, 9],
+[146105, 400, 1, 9],
+[146118, 400, 1, 22],
+[146307, 400, 7, 29],
+[146418, 400, 11, 17],
+[146453, 400, 12, 22],
+[146628, 401, 6, 15],
+[146824, 401, 12, 28],
+[146907, 402, 3, 21],
+[147037, 402, 7, 29],
+[147106, 402, 10, 6],
+[147130, 402, 10, 30],
+[147199, 403, 1, 7],
+[147209, 403, 1, 17],
+[147404, 403, 7, 31],
+[147585, 404, 1, 28],
+[147697, 404, 5, 19],
+[147812, 404, 9, 11],
+[147817, 404, 9, 16],
+[147962, 405, 2, 8],
+[148019, 405, 4, 6],
+[148136, 405, 8, 1],
+[148159, 405, 8, 24],
+[148297, 406, 1, 9],
+[148371, 406, 3, 24],
+[148447, 406, 6, 8],
+[148580, 406, 10, 19],
+[148747, 407, 4, 4],
+[148938, 407, 10, 12],
+[149061, 408, 2, 12],
+[149227, 408, 7, 27],
+[149371, 408, 12, 18],
+[149452, 409, 3, 9],
+[149521, 409, 5, 17],
+[149621, 409, 8, 25],
+[149686, 409, 10, 29],
+[149749, 409, 12, 31],
+[149823, 410, 3, 15],
+[149877, 410, 5, 8],
+[149944, 410, 7, 14],
+[150134, 411, 1, 20],
+[150318, 411, 7, 23],
+[150380, 411, 9, 23],
+[150525, 412, 2, 15],
+[150716, 412, 8, 24],
+[150741, 412, 9, 18],
+[150819, 412, 12, 5],
+[150884, 413, 2, 8],
+[151017, 413, 6, 21],
+[151030, 413, 7, 4],
+[151183, 413, 12, 4],
+[151280, 414, 3, 11],
+[151374, 414, 6, 13],
+[151393, 414, 7, 2],
+[151506, 414, 10, 23],
+[151601, 415, 1, 26],
+[151746, 415, 6, 20],
+[151767, 415, 7, 11],
+[151853, 415, 10, 5],
+[151958, 416, 1, 18],
+[152090, 416, 5, 29],
+[152149, 416, 7, 27],
+[152219, 416, 10, 5],
+[152370, 417, 3, 5],
+[152555, 417, 9, 6],
+[152670, 417, 12, 30],
+[152695, 418, 1, 24],
+[152760, 418, 3, 30],
+[152802, 418, 5, 11],
+[153002, 418, 11, 27],
+[153200, 419, 6, 13],
+[153380, 419, 12, 10],
+[153567, 420, 6, 14],
+[153680, 420, 10, 5],
+[153782, 421, 1, 15],
+[153814, 421, 2, 16],
+[153924, 421, 6, 6],
+[153964, 421, 7, 16],
+[154100, 421, 11, 29],
+[154109, 421, 12, 8],
+[154258, 422, 5, 6],
+[154307, 422, 6, 24],
+[154382, 422, 9, 7],
+[154412, 422, 10, 7],
+[154612, 423, 4, 25],
+[154737, 423, 8, 28],
+[154904, 424, 2, 11],
+[155052, 424, 7, 8],
+[155106, 424, 8, 31],
+[155255, 425, 1, 27],
+[155362, 425, 5, 14],
+[155471, 425, 8, 31],
+[155506, 425, 10, 5],
+[155535, 425, 11, 3],
+[155717, 426, 5, 4],
+[155746, 426, 6, 2],
+[155776, 426, 7, 2],
+[155961, 427, 1, 3],
+[155986, 427, 1, 28],
+[155996, 427, 2, 7],
+[156146, 427, 7, 7],
+[156280, 427, 11, 18],
+[156446, 428, 5, 2],
+[156535, 428, 7, 30],
+[156669, 428, 12, 11],
+[156734, 429, 2, 14],
+[156747, 429, 2, 27],
+[156857, 429, 6, 17],
+[157014, 429, 11, 21],
+[157169, 430, 4, 25],
+[157183, 430, 5, 9],
+[157380, 430, 11, 22],
+[157531, 431, 4, 22],
+[157680, 431, 9, 18],
+[157805, 432, 1, 21],
+[157894, 432, 4, 19],
+[157916, 432, 5, 11],
+[158081, 432, 10, 23],
+[158137, 432, 12, 18],
+[158263, 433, 4, 23],
+[158385, 433, 8, 23],
+[158443, 433, 10, 20],
+[158606, 434, 4, 1],
+[158739, 434, 8, 12],
+[158892, 435, 1, 12],
+[159012, 435, 5, 12],
+[159142, 435, 9, 19],
+[159274, 436, 1, 29],
+[159384, 436, 5, 18],
+[159385, 436, 5, 19],
+[159583, 436, 12, 3],
+[159697, 437, 3, 27],
+[159844, 437, 8, 21],
+[160005, 438, 1, 29],
+[160083, 438, 4, 17],
+[160096, 438, 4, 30],
+[160221, 438, 9, 2],
+[160362, 439, 1, 21],
+[160488, 439, 5, 27],
+[160506, 439, 6, 14],
+[160589, 439, 9, 5],
+[160774, 440, 3, 8],
+[160812, 440, 4, 15],
+[160931, 440, 8, 12],
+[161086, 441, 1, 14],
+[161277, 441, 7, 24],
+[161334, 441, 9, 19],
+[161493, 442, 2, 25],
+[161574, 442, 5, 17],
+[161701, 442, 9, 21],
+[161836, 443, 2, 3],
+[162014, 443, 7, 31],
+[162031, 443, 8, 17],
+[162205, 444, 2, 7],
+[162370, 444, 7, 21],
+[162375, 444, 7, 26],
+[162432, 444, 9, 21],
+[162513, 444, 12, 11],
+[162552, 445, 1, 19],
+[162579, 445, 2, 15],
+[162633, 445, 4, 10],
+[162636, 445, 4, 13],
+[162688, 445, 6, 4],
+[162874, 445, 12, 7],
+[162909, 446, 1, 11],
+[162967, 446, 3, 10],
+[162999, 446, 4, 11],
+[163056, 446, 6, 7],
+[163253, 446, 12, 21],
+[163392, 447, 5, 9],
+[163490, 447, 8, 15],
+[163614, 447, 12, 17],
+[163782, 448, 6, 2],
+[163956, 448, 11, 23],
+[164091, 449, 4, 7],
+[164272, 449, 10, 5],
+[164426, 450, 3, 8],
+[164472, 450, 4, 23],
+[164488, 450, 5, 9],
+[164536, 450, 6, 26],
+[164723, 450, 12, 30],
+[164863, 451, 5, 19],
+[164915, 451, 7, 10],
+[164920, 451, 7, 15],
+[164937, 451, 8, 1],
+[165090, 452, 1, 1],
+[165113, 452, 1, 24],
+[165123, 452, 2, 3],
+[165130, 452, 2, 10],
+[165157, 452, 3, 8],
+[165194, 452, 4, 14],
+[165273, 452, 7, 2],
+[165440, 452, 12, 16],
+[165634, 453, 6, 28],
+[165790, 453, 12, 1],
+[165828, 454, 1, 8],
+[166008, 454, 7, 7],
+[166175, 454, 12, 21],
+[166320, 455, 5, 15],
+[166455, 455, 9, 27],
+[166640, 456, 3, 30],
+[166801, 456, 9, 7],
+[166877, 456, 11, 22],
+[167018, 457, 4, 12],
+[167191, 457, 10, 2],
+[167369, 458, 3, 29],
+[167473, 458, 7, 11],
+[167558, 458, 10, 4],
+[167684, 459, 2, 7],
+[167741, 459, 4, 5],
+[167854, 459, 7, 27],
+[167906, 459, 9, 17],
+[168002, 459, 12, 22],
+[168149, 460, 5, 17],
+[168267, 460, 9, 12],
+[168433, 461, 2, 25],
+[168469, 461, 4, 2],
+[168658, 461, 10, 8],
+[168805, 462, 3, 4],
+[168939, 462, 7, 16],
+[169003, 462, 9, 18],
+[169104, 462, 12, 28],
+[169164, 463, 2, 26],
+[169196, 463, 3, 30],
+[169229, 463, 5, 2],
+[169341, 463, 8, 22],
+[169362, 463, 9, 12],
+[169372, 463, 9, 22],
+[169382, 463, 10, 2],
+[169483, 464, 1, 11],
+[169660, 464, 7, 6],
+[169837, 464, 12, 30],
+[169937, 465, 4, 9],
+[170074, 465, 8, 24],
+[170180, 465, 12, 8],
+[170334, 466, 5, 11],
+[170490, 466, 10, 14],
+[170645, 467, 3, 18],
+[170829, 467, 9, 18],
+[171022, 468, 3, 29],
+[171059, 468, 5, 5],
+[171150, 468, 8, 4],
+[171202, 468, 9, 25],
+[171208, 468, 10, 1],
+[171347, 469, 2, 17],
+[171351, 469, 2, 21],
+[171419, 469, 4, 30],
+[171433, 469, 5, 14],
+[171553, 469, 9, 11],
+[171559, 469, 9, 17],
+[171562, 469, 9, 20],
+[171678, 470, 1, 14],
+[171798, 470, 5, 14],
+[171967, 470, 10, 30],
+[172141, 471, 4, 22],
+[172266, 471, 8, 25],
+[172386, 471, 12, 23],
+[172462, 472, 3, 8],
+[172600, 472, 7, 24],
+[172789, 473, 1, 29],
+[172870, 473, 4, 20],
+[172911, 473, 5, 31],
+[172972, 473, 7, 31],
+[173098, 473, 12, 4],
+[173258, 474, 5, 13],
+[173360, 474, 8, 23],
+[173486, 474, 12, 27],
+[173610, 475, 4, 30],
+[173687, 475, 7, 16],
+[173828, 475, 12, 4],
+[174024, 476, 6, 17],
+[174047, 476, 7, 10],
+[174064, 476, 7, 27],
+[174169, 476, 11, 9],
+[174214, 476, 12, 24],
+[174400, 477, 6, 28],
+[174437, 477, 8, 4],
+[174483, 477, 9, 19],
+[174613, 478, 1, 27],
+[174634, 478, 2, 17],
+[174816, 478, 8, 18],
+[174881, 478, 10, 22],
+[175045, 479, 4, 4],
+[175221, 479, 9, 27],
+[175252, 479, 10, 28],
+[175445, 480, 5, 8],
+[175517, 480, 7, 19],
+[175683, 481, 1, 1],
+[175780, 481, 4, 8],
+[175962, 481, 10, 7],
+[176002, 481, 11, 16],
+[176087, 482, 2, 9],
+[176146, 482, 4, 9],
+[176285, 482, 8, 26],
+[176481, 483, 3, 10],
+[176607, 483, 7, 14],
+[176636, 483, 8, 12],
+[176785, 484, 1, 8],
+[176880, 484, 4, 12],
+[177013, 484, 8, 23],
+[177210, 485, 3, 8],
+[177308, 485, 6, 14],
+[177504, 485, 12, 27],
+[177515, 486, 1, 7],
+[177562, 486, 2, 23],
+[177598, 486, 3, 31],
+[177723, 486, 8, 3],
+[177809, 486, 10, 28],
+[177961, 487, 3, 29],
+[178083, 487, 7, 29],
+[178241, 488, 1, 3],
+[178349, 488, 4, 20],
+[178387, 488, 5, 28],
+[178520, 488, 10, 8],
+[178591, 488, 12, 18],
+[178791, 489, 7, 6],
+[178857, 489, 9, 10],
+[179018, 490, 2, 18],
+[179113, 490, 5, 24],
+[179149, 490, 6, 29],
+[179163, 490, 7, 13],
+[179253, 490, 10, 11],
+[179390, 491, 2, 25],
+[179537, 491, 7, 22],
+[179716, 492, 1, 17],
+[179896, 492, 7, 15],
+[180079, 493, 1, 14],
+[180257, 493, 7, 11],
+[180358, 493, 10, 20],
+[180363, 493, 10, 25],
+[180509, 494, 3, 20],
+[180564, 494, 5, 14],
+[180753, 494, 11, 19],
+[180854, 495, 2, 28],
+[180965, 495, 6, 19],
+[181131, 495, 12, 2],
+[181264, 496, 4, 13],
+[181356, 496, 7, 14],
+[181469, 496, 11, 4],
+[181516, 496, 12, 21],
+[181570, 497, 2, 13],
+[181674, 497, 5, 28],
+[181761, 497, 8, 23],
+[181846, 497, 11, 16],
+[181905, 498, 1, 14],
+[182076, 498, 7, 4],
+[182185, 498, 10, 21],
+[182248, 498, 12, 23],
+[182313, 499, 2, 26],
+[182320, 499, 3, 5],
+[182496, 499, 8, 28],
+[182566, 499, 11, 6],
+[182745, 500, 5, 4],
+[182900, 500, 10, 6],
+[182914, 500, 10, 20],
+[182978, 500, 12, 23],
+[183149, 501, 6, 12],
+[183332, 501, 12, 12],
+[183482, 502, 5, 11],
+[183616, 502, 9, 22],
+[183793, 503, 3, 18],
+[183873, 503, 6, 6],
+[184014, 503, 10, 25],
+[184108, 504, 1, 27],
+[184250, 504, 6, 17],
+[184288, 504, 7, 25],
+[184411, 504, 11, 25],
+[184611, 505, 6, 13],
+[184737, 505, 10, 17],
+[184928, 506, 4, 26],
+[185097, 506, 10, 12],
+[185267, 507, 3, 31],
+[185323, 507, 5, 26],
+[185356, 507, 6, 28],
+[185539, 507, 12, 28],
+[185652, 508, 4, 19],
+[185781, 508, 8, 26],
+[185911, 509, 1, 3],
+[186005, 509, 4, 7],
+[186177, 509, 9, 26],
+[186256, 509, 12, 14],
+[186447, 510, 6, 23],
+[186563, 510, 10, 17],
+[186593, 510, 11, 16],
+[186729, 511, 4, 1],
+[186757, 511, 4, 29],
+[186913, 511, 10, 2],
+[187047, 512, 2, 13],
+[187184, 512, 6, 29],
+[187353, 512, 12, 15],
+[187460, 513, 4, 1],
+[187501, 513, 5, 12],
+[187610, 513, 8, 29],
+[187759, 514, 1, 25],
+[187911, 514, 6, 26],
+[187944, 514, 7, 29],
+[187960, 514, 8, 14],
+[188019, 514, 10, 12],
+[188080, 514, 12, 12],
+[188130, 515, 1, 31],
+[188153, 515, 2, 23],
+[188248, 515, 5, 29],
+[188439, 515, 12, 6],
+[188522, 516, 2, 27],
+[188528, 516, 3, 4],
+[188636, 516, 6, 20],
+[188694, 516, 8, 17],
+[188713, 516, 9, 5],
+[188899, 517, 3, 10],
+[188952, 517, 5, 2],
+[188957, 517, 5, 7],
+[188996, 517, 6, 15],
+[189106, 517, 10, 3],
+[189225, 518, 1, 30],
+[189284, 518, 3, 30],
+[189330, 518, 5, 15],
+[189402, 518, 7, 26],
+[189433, 518, 8, 26],
+[189625, 519, 3, 6],
+[189721, 519, 6, 10],
+[189847, 519, 10, 14],
+[190026, 520, 4, 10],
+[190091, 520, 6, 14],
+[190213, 520, 10, 14],
+[190318, 521, 1, 27],
+[190362, 521, 3, 12],
+[190545, 521, 9, 11],
+[190581, 521, 10, 17],
+[190690, 522, 2, 3],
+[190842, 522, 7, 5],
+[190889, 522, 8, 21],
+[191086, 523, 3, 6],
+[191206, 523, 7, 4],
+[191207, 523, 7, 5],
+[191283, 523, 9, 19],
+[191329, 523, 11, 4],
+[191404, 524, 1, 18],
+[191479, 524, 4, 2],
+[191624, 524, 8, 25],
+[191800, 525, 2, 17],
+[191842, 525, 3, 31],
+[191985, 525, 8, 21],
+[192184, 526, 3, 8],
+[192197, 526, 3, 21],
+[192371, 526, 9, 11],
+[192567, 527, 3, 26],
+[192707, 527, 8, 13],
+[192773, 527, 10, 18],
+[192935, 528, 3, 28],
+[193080, 528, 8, 20],
+[193093, 528, 9, 2],
+[193216, 529, 1, 3],
+[193385, 529, 6, 21],
+[193573, 529, 12, 26],
+[193722, 530, 5, 24],
+[193751, 530, 6, 22],
+[193880, 530, 10, 29],
+[194063, 531, 4, 30],
+[194110, 531, 6, 16],
+[194174, 531, 8, 19],
+[194280, 531, 12, 3],
+[194461, 532, 6, 1],
+[194574, 532, 9, 22],
+[194670, 532, 12, 27],
+[194737, 533, 3, 4],
+[194853, 533, 6, 28],
+[194875, 533, 7, 20],
+[194911, 533, 8, 25],
+[194978, 533, 10, 31],
+[195036, 533, 12, 28],
+[195098, 534, 2, 28],
+[195112, 534, 3, 14],
+[195242, 534, 7, 22],
+[195296, 534, 9, 14],
+[195365, 534, 11, 22],
+[195434, 535, 1, 30],
+[195521, 535, 4, 27],
+[195544, 535, 5, 20],
+[195601, 535, 7, 16],
+[195699, 535, 10, 22],
+[195721, 535, 11, 13],
+[195750, 535, 12, 12],
+[195785, 536, 1, 16],
+[195853, 536, 3, 24],
+[195994, 536, 8, 12],
+[196176, 537, 2, 10],
+[196294, 537, 6, 8],
+[196435, 537, 10, 27],
+[196620, 538, 4, 30],
+[196759, 538, 9, 16],
+[196774, 538, 10, 1],
+[196969, 539, 4, 14],
+[197036, 539, 6, 20],
+[197165, 539, 10, 27],
+[197263, 540, 2, 2],
+[197421, 540, 7, 9],
+[197527, 540, 10, 23],
+[197623, 541, 1, 27],
+[197750, 541, 6, 3],
+[197767, 541, 6, 20],
+[197786, 541, 7, 9],
+[197986, 542, 1, 25],
+[198133, 542, 6, 21],
+[198281, 542, 11, 16],
+[198449, 543, 5, 3],
+[198543, 543, 8, 5],
+[198599, 543, 9, 30],
+[198643, 543, 11, 13],
+[198791, 544, 4, 9],
+[198906, 544, 8, 2],
+[198957, 544, 9, 22],
+[198978, 544, 10, 13],
+[198995, 544, 10, 30],
+[199049, 544, 12, 23],
+[199082, 545, 1, 25],
+[199170, 545, 4, 23],
+[199307, 545, 9, 7],
+[199485, 546, 3, 4],
+[199512, 546, 3, 31],
+[199608, 546, 7, 5],
+[199748, 546, 11, 22],
+[199775, 546, 12, 19],
+[199848, 547, 3, 2],
+[199896, 547, 4, 19],
+[199969, 547, 7, 1],
+[200087, 547, 10, 27],
+[200201, 548, 2, 18],
+[200291, 548, 5, 18],
+[200425, 548, 9, 29],
+[200547, 549, 1, 29],
+[200601, 549, 3, 24],
+[200748, 549, 8, 18],
+[200776, 549, 9, 15],
+[200809, 549, 10, 18],
+[200837, 549, 11, 15],
+[201017, 550, 5, 14],
+[201023, 550, 5, 20],
+[201187, 550, 10, 31],
+[201277, 551, 1, 29],
+[201433, 551, 7, 4],
+[201526, 551, 10, 5],
+[201541, 551, 10, 20],
+[201658, 552, 2, 14],
+[201830, 552, 8, 4],
+[201986, 553, 1, 7],
+[202156, 553, 6, 26],
+[202352, 554, 1, 8],
+[202530, 554, 7, 5],
+[202550, 554, 7, 25],
+[202601, 554, 9, 14],
+[202662, 554, 11, 14],
+[202736, 555, 1, 27],
+[202898, 555, 7, 8],
+[202909, 555, 7, 19],
+[202989, 555, 10, 7],
+[203162, 556, 3, 28],
+[203204, 556, 5, 9],
+[203226, 556, 5, 31],
+[203346, 556, 9, 28],
+[203431, 556, 12, 22],
+[203594, 557, 6, 3],
+[203615, 557, 6, 24],
+[203803, 557, 12, 29],
+[203857, 558, 2, 21],
+[204012, 558, 7, 26],
+[204032, 558, 8, 15],
+[204107, 558, 10, 29],
+[204153, 558, 12, 14],
+[204236, 559, 3, 7],
+[204241, 559, 3, 12],
+[204367, 559, 7, 16],
+[204502, 559, 11, 28],
+[204503, 559, 11, 29],
+[204654, 560, 4, 28],
+[204813, 560, 10, 4],
+[204874, 560, 12, 4],
+[204913, 561, 1, 12],
+[204927, 561, 1, 26],
+[205101, 561, 7, 19],
+[205266, 561, 12, 31],
+[205283, 562, 1, 17],
+[205404, 562, 5, 18],
+[205550, 562, 10, 11],
+[205611, 562, 12, 11],
+[205795, 563, 6, 13],
+[205863, 563, 8, 20],
+[205884, 563, 9, 10],
+[205930, 563, 10, 26],
+[205936, 563, 11, 1],
+[206066, 564, 3, 10],
+[206205, 564, 7, 27],
+[206222, 564, 8, 13],
+[206277, 564, 10, 7],
+[206350, 564, 12, 19],
+[206521, 565, 6, 8],
+[206709, 565, 12, 13],
+[206898, 566, 6, 20],
+[207062, 566, 12, 1],
+[207092, 566, 12, 31],
+[207147, 567, 2, 24],
+[207197, 567, 4, 15],
+[207204, 567, 4, 22],
+[207355, 567, 9, 20],
+[207413, 567, 11, 17],
+[207515, 568, 2, 27],
+[207517, 568, 2, 29],
+[207674, 568, 8, 4],
+[207806, 568, 12, 14],
+[207846, 569, 1, 23],
+[207943, 569, 4, 30],
+[207975, 569, 6, 1],
+[208151, 569, 11, 24],
+[208233, 570, 2, 14],
+[208261, 570, 3, 14],
+[208360, 570, 6, 21],
+[208482, 570, 10, 21],
+[208496, 570, 11, 4],
+[208624, 571, 3, 12],
+[208771, 571, 8, 6],
+[208901, 571, 12, 14],
+[208926, 572, 1, 8],
+[208985, 572, 3, 7],
+[209172, 572, 9, 10],
+[209211, 572, 10, 19],
+[209396, 573, 4, 22],
+[209580, 573, 10, 23],
+[209680, 574, 1, 31],
+[209751, 574, 4, 12],
+[209884, 574, 8, 23],
+[210029, 575, 1, 15],
+[210150, 575, 5, 16],
+[210173, 575, 6, 8],
+[210182, 575, 6, 17],
+[210291, 575, 10, 4],
+[210337, 575, 11, 19],
+[210469, 576, 3, 30],
+[210637, 576, 9, 14],
+[210696, 576, 11, 12],
+[210878, 577, 5, 13],
+[210881, 577, 5, 16],
+[210950, 577, 7, 24],
+[210975, 577, 8, 18],
+[211030, 577, 10, 12],
+[211061, 577, 11, 12],
+[211256, 578, 5, 26],
+[211318, 578, 7, 27],
+[211369, 578, 9, 16],
+[211542, 579, 3, 8],
+[211590, 579, 4, 25],
+[211732, 579, 9, 14],
+[211758, 579, 10, 10],
+[211843, 580, 1, 3],
+[211992, 580, 5, 31],
+[212100, 580, 9, 16],
+[212155, 580, 11, 10],
+[212203, 580, 12, 28],
+[212397, 581, 7, 10],
+[212438, 581, 8, 20],
+[212562, 581, 12, 22],
+[212611, 582, 2, 9],
+[212715, 582, 5, 24],
+[212765, 582, 7, 13],
+[212828, 582, 9, 14],
+[212880, 582, 11, 5],
+[212894, 582, 11, 19],
+[213041, 583, 4, 15],
+[213047, 583, 4, 21],
+[213082, 583, 5, 26],
+[213126, 583, 7, 9],
+[213164, 583, 8, 16],
+[213174, 583, 8, 26],
+[213372, 584, 3, 11],
+[213537, 584, 8, 23],
+[213737, 585, 3, 11],
+[213848, 585, 6, 30],
+[214033, 586, 1, 1],
+[214115, 586, 3, 24],
+[214118, 586, 3, 27],
+[214158, 586, 5, 6],
+[214202, 586, 6, 19],
+[214285, 586, 9, 10],
+[214324, 586, 10, 19],
+[214360, 586, 11, 24],
+[214474, 587, 3, 18],
+[214552, 587, 6, 4],
+[214750, 587, 12, 19],
+[214877, 588, 4, 24],
+[215036, 588, 9, 30],
+[215082, 588, 11, 15],
+[215229, 589, 4, 11],
+[215241, 589, 4, 23],
+[215433, 589, 11, 1],
+[215454, 589, 11, 22],
+[215499, 590, 1, 6],
+[215625, 590, 5, 12],
+[215744, 590, 9, 8],
+[215815, 590, 11, 18],
+[215979, 591, 5, 1],
+[216083, 591, 8, 13],
+[216252, 592, 1, 29],
+[216316, 592, 4, 2],
+[216358, 592, 5, 14],
+[216491, 592, 9, 24],
+[216568, 592, 12, 10],
+[216702, 593, 4, 23],
+[216847, 593, 9, 15],
+[216858, 593, 9, 26],
+[216884, 593, 10, 22],
+[217064, 594, 4, 20],
+[217104, 594, 5, 30],
+[217220, 594, 9, 23],
+[217299, 594, 12, 11],
+[217491, 595, 6, 21],
+[217498, 595, 6, 28],
+[217502, 595, 7, 2],
+[217657, 595, 12, 4],
+[217825, 596, 5, 20],
+[218012, 596, 11, 23],
+[218157, 597, 4, 17],
+[218199, 597, 5, 29],
+[218366, 597, 11, 12],
+[218405, 597, 12, 21],
+[218439, 598, 1, 24],
+[218474, 598, 2, 28],
+[218514, 598, 4, 9],
+[218538, 598, 5, 3],
+[218603, 598, 7, 7],
+[218625, 598, 7, 29],
+[218711, 598, 10, 23],
+[218803, 599, 1, 23],
+[218871, 599, 4, 1],
+[219071, 599, 10, 18],
+[219207, 600, 3, 3],
+[219243, 600, 4, 8],
+[219356, 600, 7, 30],
+[219379, 600, 8, 22],
+[219476, 600, 11, 27],
+[219493, 600, 12, 14],
+[219675, 601, 6, 14],
+[219844, 601, 11, 30],
+[220040, 602, 6, 14],
+[220136, 602, 9, 18],
+[220158, 602, 10, 10],
+[220296, 603, 2, 25],
+[220450, 603, 7, 29],
+[220506, 603, 9, 23],
+[220530, 603, 10, 17],
+[220633, 604, 1, 28],
+[220638, 604, 2, 2],
+[220715, 604, 4, 19],
+[220808, 604, 7, 21],
+[220820, 604, 8, 2],
+[220860, 604, 9, 11],
+[220891, 604, 10, 12],
+[221030, 605, 2, 28],
+[221145, 605, 6, 23],
+[221339, 606, 1, 3],
+[221366, 606, 1, 30],
+[221478, 606, 5, 22],
+[221612, 606, 10, 3],
+[221726, 607, 1, 25],
+[221876, 607, 6, 24],
+[222020, 607, 11, 15],
+[222091, 608, 1, 25],
+[222167, 608, 4, 10],
+[222224, 608, 6, 6],
+[222380, 608, 11, 9],
+[222484, 609, 2, 21],
+[222644, 609, 7, 31],
+[222802, 610, 1, 5],
+[222883, 610, 3, 27],
+[223045, 610, 9, 5],
+[223120, 610, 11, 19],
+[223171, 611, 1, 9],
+[223228, 611, 3, 7],
+[223324, 611, 6, 11],
+[223362, 611, 7, 19],
+[223427, 611, 9, 22],
+[223444, 611, 10, 9],
+[223619, 612, 4, 1],
+[223637, 612, 4, 19],
+[223672, 612, 5, 24],
+[223720, 612, 7, 11],
+[223876, 612, 12, 14],
+[223943, 613, 2, 19],
+[223975, 613, 3, 23],
+[224077, 613, 7, 3],
+[224248, 613, 12, 21],
+[224427, 614, 6, 18],
+[224615, 614, 12, 23],
+[224797, 615, 6, 23],
+[224841, 615, 8, 6],
+[224890, 615, 9, 24],
+[225053, 616, 3, 5],
+[225242, 616, 9, 10],
+[225273, 616, 10, 11],
+[225299, 616, 11, 6],
+[225409, 617, 2, 24],
+[225557, 617, 7, 22],
+[225590, 617, 8, 24],
+[225625, 617, 9, 28],
+[225666, 617, 11, 8],
+[225825, 618, 4, 16],
+[225859, 618, 5, 20],
+[225973, 618, 9, 11],
+[226097, 619, 1, 13],
+[226216, 619, 5, 12],
+[226380, 619, 10, 23],
+[226473, 620, 1, 24],
+[226506, 620, 2, 26],
+[226562, 620, 4, 22],
+[226577, 620, 5, 7],
+[226663, 620, 8, 1],
+[226859, 621, 2, 13],
+[226959, 621, 5, 24],
+[227154, 621, 12, 5],
+[227183, 622, 1, 3],
+[227251, 622, 3, 12],
+[227273, 622, 4, 3],
+[227364, 622, 7, 3],
+[227488, 622, 11, 4],
+[227578, 623, 2, 2],
+[227594, 623, 2, 18],
+[227691, 623, 5, 26],
+[227705, 623, 6, 9],
+[227813, 623, 9, 25],
+[227957, 624, 2, 16],
+[228052, 624, 5, 21],
+[228125, 624, 8, 2],
+[228226, 624, 11, 11],
+[228231, 624, 11, 16],
+[228384, 625, 4, 18],
+[228532, 625, 9, 13],
+[228715, 626, 3, 15],
+[228898, 626, 9, 14],
+[229047, 627, 2, 10],
+[229153, 627, 5, 27],
+[229284, 627, 10, 5],
+[229432, 628, 3, 1],
+[229559, 628, 7, 6],
+[229742, 629, 1, 5],
+[229930, 629, 7, 12],
+[230041, 629, 10, 31],
+[230074, 629, 12, 3],
+[230163, 630, 3, 2],
+[230299, 630, 7, 16],
+[230394, 630, 10, 19],
+[230590, 631, 5, 3],
+[230693, 631, 8, 14],
+[230736, 631, 9, 26],
+[230908, 632, 3, 16],
+[231021, 632, 7, 7],
+[231141, 632, 11, 4],
+[231178, 632, 12, 11],
+[231312, 633, 4, 24],
+[231330, 633, 5, 12],
+[231349, 633, 5, 31],
+[231536, 633, 12, 4],
+[231672, 634, 4, 19],
+[231813, 634, 9, 7],
+[231980, 635, 2, 21],
+[232112, 635, 7, 3],
+[232119, 635, 7, 10],
+[232130, 635, 7, 21],
+[232175, 635, 9, 4],
+[232320, 636, 1, 27],
+[232334, 636, 2, 10],
+[232338, 636, 2, 14],
+[232518, 636, 8, 12],
+[232567, 636, 9, 30],
+[232656, 636, 12, 28],
+[232798, 637, 5, 19],
+[232906, 637, 9, 4],
+[233081, 638, 2, 26],
+[233211, 638, 7, 6],
+[233391, 639, 1, 2],
+[233542, 639, 6, 2],
+[233639, 639, 9, 7],
+[233815, 640, 3, 1],
+[233941, 640, 7, 5],
+[234130, 641, 1, 10],
+[234214, 641, 4, 4],
+[234249, 641, 5, 9],
+[234270, 641, 5, 30],
+[234291, 641, 6, 20],
+[234455, 641, 12, 1],
+[234504, 642, 1, 19],
+[234536, 642, 2, 20],
+[234674, 642, 7, 8],
+[234852, 643, 1, 2],
+[234955, 643, 4, 15],
+[235132, 643, 10, 9],
+[235206, 643, 12, 22],
+[235302, 644, 3, 27],
+[235479, 644, 9, 20],
+[235563, 644, 12, 13],
+[235584, 645, 1, 3],
+[235760, 645, 6, 28],
+[235781, 645, 7, 19],
+[235891, 645, 11, 6],
+[235900, 645, 11, 15],
+[236028, 646, 3, 23],
+[236050, 646, 4, 14],
+[236152, 646, 7, 25],
+[236275, 646, 11, 25],
+[236331, 647, 1, 20],
+[236373, 647, 3, 3],
+[236567, 647, 9, 13],
+[236596, 647, 10, 12],
+[236760, 648, 3, 24],
+[236829, 648, 6, 1],
+[236857, 648, 6, 29],
+[237048, 649, 1, 6],
+[237241, 649, 7, 18],
+[237304, 649, 9, 19],
+[237463, 650, 2, 25],
+[237615, 650, 7, 27],
+[237768, 650, 12, 27],
+[237889, 651, 4, 27],
+[237977, 651, 7, 24],
+[238082, 651, 11, 6],
+[238153, 652, 1, 16],
+[238295, 652, 6, 6],
+[238338, 652, 7, 19],
+[238535, 653, 2, 1],
+[238578, 653, 3, 16],
+[238673, 653, 6, 19],
+[238694, 653, 7, 10],
+[238784, 653, 10, 8],
+[238915, 654, 2, 16],
+[239102, 654, 8, 22],
+[239157, 654, 10, 16],
+[239338, 655, 4, 15],
+[239425, 655, 7, 11],
+[239604, 656, 1, 6],
+[239768, 656, 6, 18],
+[239776, 656, 6, 26],
+[239888, 656, 10, 16],
+[239890, 656, 10, 18],
+[240084, 657, 4, 30],
+[240220, 657, 9, 13],
+[240375, 658, 2, 15],
+[240379, 658, 2, 19],
+[240473, 658, 5, 24],
+[240562, 658, 8, 21],
+[240591, 658, 9, 19],
+[240638, 658, 11, 5],
+[240803, 659, 4, 19],
+[240891, 659, 7, 16],
+[241060, 660, 1, 1],
+[241100, 660, 2, 10],
+[241199, 660, 5, 19],
+[241366, 660, 11, 2],
+[241510, 661, 3, 26],
+[241563, 661, 5, 18],
+[241663, 661, 8, 26],
+[241784, 661, 12, 25],
+[241790, 661, 12, 31],
+[241857, 662, 3, 8],
+[241915, 662, 5, 5],
+[242028, 662, 8, 26],
+[242087, 662, 10, 24],
+[242249, 663, 4, 4],
+[242431, 663, 10, 3],
+[242605, 664, 3, 25],
+[242775, 664, 9, 11],
+[242953, 665, 3, 8],
+[243056, 665, 6, 19],
+[243206, 665, 11, 16],
+[243218, 665, 11, 28],
+[243275, 666, 1, 24],
+[243321, 666, 3, 11],
+[243480, 666, 8, 17],
+[243666, 667, 2, 19],
+[243708, 667, 4, 2],
+[243766, 667, 5, 30],
+[243785, 667, 6, 18],
+[243887, 667, 9, 28],
+[243953, 667, 12, 3],
+[243971, 667, 12, 21],
+[243981, 667, 12, 31],
+[244144, 668, 6, 11],
+[244249, 668, 9, 24],
+[244445, 669, 4, 8],
+[244605, 669, 9, 15],
+[244691, 669, 12, 10],
+[244869, 670, 6, 6],
+[244904, 670, 7, 11],
+[245001, 670, 10, 16],
+[245084, 671, 1, 7],
+[245252, 671, 6, 24],
+[245332, 671, 9, 12],
+[245353, 671, 10, 3],
+[245475, 672, 2, 2],
+[245599, 672, 6, 5],
+[245769, 672, 11, 22],
+[245924, 673, 4, 26],
+[246070, 673, 9, 19],
+[246086, 673, 10, 5],
+[246260, 674, 3, 28],
+[246383, 674, 7, 29],
+[246573, 675, 2, 4],
+[246650, 675, 4, 22],
+[246733, 675, 7, 14],
+[246743, 675, 7, 24],
+[246891, 675, 12, 19],
+[246929, 676, 1, 26],
+[247016, 676, 4, 22],
+[247086, 676, 7, 1],
+[247126, 676, 8, 10],
+[247225, 676, 11, 17],
+[247364, 677, 4, 5],
+[247393, 677, 5, 4],
+[247446, 677, 6, 26],
+[247513, 677, 9, 1],
+[247520, 677, 9, 8],
+[247711, 678, 3, 18],
+[247822, 678, 7, 7],
+[247916, 678, 10, 9],
+[248050, 679, 2, 20],
+[248072, 679, 3, 14],
+[248087, 679, 3, 29],
+[248209, 679, 7, 29],
+[248373, 680, 1, 9],
+[248567, 680, 7, 21],
+[248599, 680, 8, 22],
+[248725, 680, 12, 26],
+[248789, 681, 2, 28],
+[248834, 681, 4, 14],
+[248845, 681, 4, 25],
+[248994, 681, 9, 21],
+[249010, 681, 10, 7],
+[249139, 682, 2, 13],
+[249187, 682, 4, 2],
+[249372, 682, 10, 4],
+[249376, 682, 10, 8],
+[249551, 683, 4, 1],
+[249674, 683, 8, 2],
+[249680, 683, 8, 8],
+[249707, 683, 9, 4],
+[249812, 683, 12, 18],
+[249999, 684, 6, 22],
+[250155, 684, 11, 25],
+[250311, 685, 4, 30],
+[250499, 685, 11, 4],
+[250670, 686, 4, 24],
+[250848, 686, 10, 19],
+[250898, 686, 12, 8],
+[250937, 687, 1, 16],
+[250973, 687, 2, 21],
+[251003, 687, 3, 23],
+[251193, 687, 9, 29],
+[251364, 688, 3, 18],
+[251473, 688, 7, 5],
+[251525, 688, 8, 26],
+[251535, 688, 9, 5],
+[251636, 688, 12, 15],
+[251667, 689, 1, 15],
+[251822, 689, 6, 19],
+[251844, 689, 7, 11],
+[251954, 689, 10, 29],
+[252034, 690, 1, 17],
+[252051, 690, 2, 3],
+[252162, 690, 5, 25],
+[252189, 690, 6, 21],
+[252236, 690, 8, 7],
+[252414, 691, 2, 1],
+[252509, 691, 5, 7],
+[252520, 691, 5, 18],
+[252658, 691, 10, 3],
+[252664, 691, 10, 9],
+[252679, 691, 10, 24],
+[252780, 692, 2, 2],
+[252836, 692, 3, 29],
+[252912, 692, 6, 13],
+[253089, 692, 12, 7],
+[253132, 693, 1, 19],
+[253308, 693, 7, 14],
+[253445, 693, 11, 28],
+[253446, 693, 11, 29],
+[253464, 693, 12, 17],
+[253577, 694, 4, 9],
+[253631, 694, 6, 2],
+[253774, 694, 10, 23],
+[253963, 695, 4, 30],
+[254105, 695, 9, 19],
+[254151, 695, 11, 4],
+[254224, 696, 1, 16],
+[254247, 696, 2, 8],
+[254310, 696, 4, 11],
+[254445, 696, 8, 24],
+[254607, 697, 2, 2],
+[254632, 697, 2, 27],
+[254826, 697, 9, 9],
+[254857, 697, 10, 10],
+[255010, 698, 3, 12],
+[255198, 698, 9, 16],
+[255226, 698, 10, 14],
+[255281, 698, 12, 8],
+[255443, 699, 5, 19],
+[255466, 699, 6, 11],
+[255589, 699, 10, 12],
+[255647, 699, 12, 9],
+[255758, 700, 3, 30],
+[255958, 700, 10, 16],
+[255985, 700, 11, 12],
+[256185, 701, 5, 31],
+[256186, 701, 6, 1],
+[256335, 701, 10, 28],
+[256388, 701, 12, 20],
+[256466, 702, 3, 8],
+[256581, 702, 7, 1],
+[256601, 702, 7, 21],
+[256791, 703, 1, 27],
+[256975, 703, 7, 30],
+[256985, 703, 8, 9],
+[257133, 704, 1, 4],
+[257224, 704, 4, 4],
+[257381, 704, 9, 8],
+[257492, 704, 12, 28],
+[257541, 705, 2, 15],
+[257628, 705, 5, 13],
+[257711, 705, 8, 4],
+[257819, 705, 11, 20],
+[257910, 706, 2, 19],
+[258056, 706, 7, 15],
+[258188, 706, 11, 24],
+[258262, 707, 2, 6],
+[258306, 707, 3, 22],
+[258349, 707, 5, 4],
+[258535, 707, 11, 6],
+[258544, 707, 11, 15],
+[258554, 707, 11, 25],
+[258635, 708, 2, 14],
+[258656, 708, 3, 6],
+[258748, 708, 6, 6],
+[258880, 708, 10, 16],
+[258979, 709, 1, 23],
+[259071, 709, 4, 25],
+[259112, 709, 6, 5],
+[259301, 709, 12, 11],
+[259309, 709, 12, 19],
+[259490, 710, 6, 18],
+[259584, 710, 9, 20],
+[259689, 711, 1, 3],
+[259887, 711, 7, 20],
+[259970, 711, 10, 11],
+[260145, 712, 4, 3],
+[260340, 712, 10, 15],
+[260408, 712, 12, 22],
+[260477, 713, 3, 1],
+[260608, 713, 7, 10],
+[260703, 713, 10, 13],
+[260888, 714, 4, 16],
+[260949, 714, 6, 16],
+[260956, 714, 6, 23],
+[261027, 714, 9, 2],
+[261108, 714, 11, 22],
+[261297, 715, 5, 30],
+[261460, 715, 11, 9],
+[261654, 716, 5, 21],
+[261672, 716, 6, 8],
+[261774, 716, 9, 18],
+[261919, 717, 2, 10],
+[262069, 717, 7, 10],
+[262263, 718, 1, 20],
+[262395, 718, 6, 1],
+[262534, 718, 10, 18],
+[262590, 718, 12, 13],
+[262750, 719, 5, 22],
+[262779, 719, 6, 20],
+[262954, 719, 12, 12],
+[263036, 720, 3, 3],
+[263072, 720, 4, 8],
+[263198, 720, 8, 12],
+[263303, 720, 11, 25],
+[263361, 721, 1, 22],
+[263362, 721, 1, 23],
+[263552, 721, 8, 1],
+[263746, 722, 2, 11],
+[263890, 722, 7, 5],
+[264078, 723, 1, 9],
+[264254, 723, 7, 4],
+[264314, 723, 9, 2],
+[264508, 724, 3, 14],
+[264673, 724, 8, 26],
+[264830, 725, 1, 30],
+[264910, 725, 4, 20],
+[264941, 725, 5, 21],
+[265038, 725, 8, 26],
+[265203, 726, 2, 7],
+[265308, 726, 5, 23],
+[265416, 726, 9, 8],
+[265542, 727, 1, 12],
+[265659, 727, 5, 9],
+[265759, 727, 8, 17],
+[265883, 727, 12, 19],
+[266018, 728, 5, 2],
+[266030, 728, 5, 14],
+[266132, 728, 8, 24],
+[266177, 728, 10, 8],
+[266237, 728, 12, 7],
+[266307, 729, 2, 15],
+[266483, 729, 8, 10],
+[266501, 729, 8, 28],
+[266512, 729, 9, 8],
+[266605, 729, 12, 10],
+[266634, 730, 1, 8],
+[266756, 730, 5, 10],
+[266867, 730, 8, 29],
+[267036, 731, 2, 14],
+[267139, 731, 5, 28],
+[267287, 731, 10, 23],
+[267332, 731, 12, 7],
+[267418, 732, 3, 2],
+[267613, 732, 9, 13],
+[267756, 733, 2, 3],
+[267829, 733, 4, 17],
+[267834, 733, 4, 22],
+[267914, 733, 7, 11],
+[268059, 733, 12, 3],
+[268198, 734, 4, 21],
+[268240, 734, 6, 2],
+[268293, 734, 7, 25],
+[268320, 734, 8, 21],
+[268433, 734, 12, 12],
+[268459, 735, 1, 7],
+[268537, 735, 3, 26],
+[268648, 735, 7, 15],
+[268756, 735, 10, 31],
+[268801, 735, 12, 15],
+[268805, 735, 12, 19],
+[268998, 736, 6, 29],
+[269162, 736, 12, 10],
+[269292, 737, 4, 19],
+[269387, 737, 7, 23],
+[269466, 737, 10, 10],
+[269513, 737, 11, 26],
+[269657, 738, 4, 19],
+[269796, 738, 9, 5],
+[269960, 739, 2, 16],
+[270156, 739, 8, 31],
+[270251, 739, 12, 4],
+[270276, 739, 12, 29],
+[270380, 740, 4, 11],
+[270473, 740, 7, 13],
+[270614, 740, 12, 1],
+[270724, 741, 3, 21],
+[270807, 741, 6, 12],
+[270881, 741, 8, 25],
+[271014, 742, 1, 5],
+[271027, 742, 1, 18],
+[271058, 742, 2, 18],
+[271119, 742, 4, 20],
+[271206, 742, 7, 16],
+[271358, 742, 12, 15],
+[271496, 743, 5, 2],
+[271681, 743, 11, 3],
+[271803, 744, 3, 4],
+[271929, 744, 7, 8],
+[272071, 744, 11, 27],
+[272175, 745, 3, 11],
+[272257, 745, 6, 1],
+[272419, 745, 11, 10],
+[272491, 746, 1, 21],
+[272588, 746, 4, 28],
+[272711, 746, 8, 29],
+[272738, 746, 9, 25],
+[272758, 746, 10, 15],
+[272927, 747, 4, 2],
+[273076, 747, 8, 29],
+[273258, 748, 2, 27],
+[273379, 748, 6, 27],
+[273459, 748, 9, 15],
+[273636, 749, 3, 11],
+[273756, 749, 7, 9],
+[273829, 749, 9, 20],
+[274000, 750, 3, 10],
+[274146, 750, 8, 3],
+[274148, 750, 8, 5],
+[274235, 750, 10, 31],
+[274368, 751, 3, 13],
+[274393, 751, 4, 7],
+[274574, 751, 10, 5],
+[274667, 752, 1, 6],
+[274736, 752, 3, 15],
+[274784, 752, 5, 2],
+[274934, 752, 9, 29],
+[274980, 752, 11, 14],
+[275006, 752, 12, 10],
+[275030, 753, 1, 3],
+[275202, 753, 6, 24],
+[275333, 753, 11, 2],
+[275433, 754, 2, 10],
+[275607, 754, 8, 3],
+[275774, 755, 1, 17],
+[275837, 755, 3, 21],
+[275843, 755, 3, 27],
+[276018, 755, 9, 18],
+[276165, 756, 2, 12],
+[276226, 756, 4, 13],
+[276397, 756, 10, 1],
+[276526, 757, 2, 7],
+[276698, 757, 7, 29],
+[276878, 758, 1, 25],
+[276911, 758, 2, 27],
+[277080, 758, 8, 15],
+[277280, 759, 3, 3],
+[277325, 759, 4, 17],
+[277432, 759, 8, 2],
+[277583, 759, 12, 31],
+[277723, 760, 5, 19],
+[277895, 760, 11, 7],
+[277962, 761, 1, 13],
+[277974, 761, 1, 25],
+[278109, 761, 6, 9],
+[278257, 761, 11, 4],
+[278313, 761, 12, 30],
+[278363, 762, 2, 18],
+[278533, 762, 8, 7],
+[278590, 762, 10, 3],
+[278784, 763, 4, 15],
+[278927, 763, 9, 5],
+[279125, 764, 3, 21],
+[279254, 764, 7, 28],
+[279321, 764, 10, 3],
+[279471, 765, 3, 2],
+[279641, 765, 8, 19],
+[279841, 766, 3, 7],
+[279975, 766, 7, 19],
+[279992, 766, 8, 5],
+[280138, 766, 12, 29],
+[280183, 767, 2, 12],
+[280358, 767, 8, 6],
+[280412, 767, 9, 29],
+[280467, 767, 11, 23],
+[280622, 768, 4, 26],
+[280716, 768, 7, 29],
+[280914, 769, 2, 12],
+[281027, 769, 6, 5],
+[281110, 769, 8, 27],
+[281186, 769, 11, 11],
+[281299, 770, 3, 4],
+[281353, 770, 4, 27],
+[281384, 770, 5, 28],
+[281466, 770, 8, 18],
+[281643, 771, 2, 11],
+[281666, 771, 3, 6],
+[281739, 771, 5, 18],
+[281756, 771, 6, 4],
+[281822, 771, 8, 9],
+[281865, 771, 9, 21],
+[281873, 771, 9, 29],
+[281915, 771, 11, 10],
+[281931, 771, 11, 26],
+[281989, 772, 1, 23],
+[282160, 772, 7, 12],
+[282242, 772, 10, 2],
+[282396, 773, 3, 5],
+[282481, 773, 5, 29],
+[282585, 773, 9, 10],
+[282746, 774, 2, 18],
+[282924, 774, 8, 15],
+[283005, 774, 11, 4],
+[283146, 775, 3, 25],
+[283235, 775, 6, 22],
+[283363, 775, 10, 28],
+[283460, 776, 2, 2],
+[283562, 776, 5, 14],
+[283645, 776, 8, 5],
+[283696, 776, 9, 25],
+[283827, 777, 2, 3],
+[283998, 777, 7, 24],
+[284129, 777, 12, 2],
+[284156, 777, 12, 29],
+[284326, 778, 6, 17],
+[284394, 778, 8, 24],
+[284474, 778, 11, 12],
+[284615, 779, 4, 2],
+[284641, 779, 4, 28],
+[284644, 779, 5, 1],
+[284801, 779, 10, 5],
+[284949, 780, 3, 1],
+[285065, 780, 6, 25],
+[285197, 780, 11, 4],
+[285234, 780, 12, 11],
+[285399, 781, 5, 25],
+[285400, 781, 5, 26],
+[285444, 781, 7, 9],
+[285640, 782, 1, 21],
+[285686, 782, 3, 8],
+[285862, 782, 8, 31],
+[286005, 783, 1, 21],
+[286107, 783, 5, 3],
+[286117, 783, 5, 13],
+[286130, 783, 5, 26],
+[286226, 783, 8, 30],
+[286250, 783, 9, 23],
+[286392, 784, 2, 12],
+[286525, 784, 6, 24],
+[286713, 784, 12, 29],
+[286746, 785, 1, 31],
+[286819, 785, 4, 14],
+[286830, 785, 4, 25],
+[286948, 785, 8, 21],
+[287106, 786, 1, 26],
+[287219, 786, 5, 19],
+[287227, 786, 5, 27],
+[287359, 786, 10, 6],
+[287401, 786, 11, 17],
+[287485, 787, 2, 9],
+[287643, 787, 7, 17],
+[287759, 787, 11, 10],
+[287819, 788, 1, 9],
+[287991, 788, 6, 29],
+[288064, 788, 9, 10],
+[288191, 789, 1, 15],
+[288352, 789, 6, 25],
+[288517, 789, 12, 7],
+[288685, 790, 5, 24],
+[288808, 790, 9, 24],
+[288854, 790, 11, 9],
+[288868, 790, 11, 23],
+[288965, 791, 2, 28],
+[289163, 791, 9, 14],
+[289279, 792, 1, 8],
+[289307, 792, 2, 5],
+[289444, 792, 6, 21],
+[289540, 792, 9, 25],
+[289579, 792, 11, 3],
+[289708, 793, 3, 12],
+[289711, 793, 3, 15],
+[289733, 793, 4, 6],
+[289870, 793, 8, 21],
+[289983, 793, 12, 12],
+[290158, 794, 6, 5],
+[290356, 794, 12, 20],
+[290511, 795, 5, 24],
+[290609, 795, 8, 30],
+[290641, 795, 10, 1],
+[290715, 795, 12, 14],
+[290905, 796, 6, 21],
+[291014, 796, 10, 8],
+[291101, 797, 1, 3],
+[291158, 797, 3, 1],
+[291187, 797, 3, 30],
+[291349, 797, 9, 8],
+[291410, 797, 11, 8],
+[291455, 797, 12, 23],
+[291623, 798, 6, 9],
+[291657, 798, 7, 13],
+[291687, 798, 8, 12],
+[291769, 798, 11, 2],
+[291808, 798, 12, 11],
+[291943, 799, 4, 25],
+[291974, 799, 5, 26],
+[292076, 799, 9, 5],
+[292242, 800, 2, 18],
+[292272, 800, 3, 19],
+[292348, 800, 6, 3],
+[292416, 800, 8, 10],
+[292581, 801, 1, 22],
+[292647, 801, 3, 29],
+[292782, 801, 8, 11],
+[292825, 801, 9, 23],
+[292868, 801, 11, 5],
+[292887, 801, 11, 24],
+[292970, 802, 2, 15],
+[293001, 802, 3, 18],
+[293131, 802, 7, 26],
+[293229, 802, 11, 1],
+[293285, 802, 12, 27],
+[293332, 803, 2, 12],
+[293391, 803, 4, 12],
+[293407, 803, 4, 28],
+[293457, 803, 6, 17],
+[293633, 803, 12, 10],
+[293740, 804, 3, 26],
+[293892, 804, 8, 25],
+[293893, 804, 8, 26],
+[293896, 804, 8, 29],
+[293948, 804, 10, 20],
+[294038, 805, 1, 18],
+[294158, 805, 5, 18],
+[294310, 805, 10, 17],
+[294433, 806, 2, 17],
+[294603, 806, 8, 6],
+[294678, 806, 10, 20],
+[294756, 807, 1, 6],
+[294885, 807, 5, 15],
+[294978, 807, 8, 16],
+[295003, 807, 9, 10],
+[295198, 808, 3, 23],
+[295344, 808, 8, 16],
+[295466, 808, 12, 16],
+[295646, 809, 6, 14],
+[295829, 809, 12, 14],
+[295911, 810, 3, 6],
+[295953, 810, 4, 17],
+[296052, 810, 7, 25],
+[296225, 811, 1, 14],
+[296312, 811, 4, 11],
+[296455, 811, 9, 1],
+[296521, 811, 11, 6],
+[296700, 812, 5, 3],
+[296866, 812, 10, 16],
+[296892, 812, 11, 11],
+[296983, 813, 2, 10],
+[297158, 813, 8, 4],
+[297259, 813, 11, 13],
+[297407, 814, 4, 10],
+[297426, 814, 4, 29],
+[297620, 814, 11, 9],
+[297625, 814, 11, 14],
+[297814, 815, 5, 22],
+[297938, 815, 9, 23],
+[298079, 816, 2, 11],
+[298204, 816, 6, 15],
+[298277, 816, 8, 27],
+[298408, 817, 1, 5],
+[298510, 817, 4, 17],
+[298656, 817, 9, 10],
+[298840, 818, 3, 13],
+[298876, 818, 4, 18],
+[298993, 818, 8, 13],
+[299145, 819, 1, 12],
+[299245, 819, 4, 22],
+[299389, 819, 9, 13],
+[299564, 820, 3, 6],
+[299595, 820, 4, 6],
+[299623, 820, 5, 4],
+[299742, 820, 8, 31],
+[299911, 821, 2, 16],
+[300100, 821, 8, 24],
+[300224, 821, 12, 26],
+[300395, 822, 6, 15],
+[300487, 822, 9, 15],
+[300546, 822, 11, 13],
+[300697, 823, 4, 13],
+[300753, 823, 6, 8],
+[300819, 823, 8, 13],
+[301017, 824, 2, 27],
+[301102, 824, 5, 22],
+[301259, 824, 10, 26],
+[301352, 825, 1, 27],
+[301426, 825, 4, 11],
+[301500, 825, 6, 24],
+[301612, 825, 10, 14],
+[301639, 825, 11, 10],
+[301667, 825, 12, 8],
+[301683, 825, 12, 24],
+[301870, 826, 6, 29],
+[301901, 826, 7, 30],
+[301986, 826, 10, 23],
+[302035, 826, 12, 11],
+[302100, 827, 2, 14],
+[302201, 827, 5, 26],
+[302333, 827, 10, 5],
+[302500, 828, 3, 20],
+[302666, 828, 9, 2],
+[302712, 828, 10, 18],
+[302811, 829, 1, 25],
+[302889, 829, 4, 13],
+[302986, 829, 7, 19],
+[303163, 830, 1, 12],
+[303313, 830, 6, 11],
+[303471, 830, 11, 16],
+[303510, 830, 12, 25],
+[303536, 831, 1, 20],
+[303712, 831, 7, 15],
+[303852, 831, 12, 2],
+[303953, 832, 3, 12],
+[304021, 832, 5, 19],
+[304060, 832, 6, 27],
+[304085, 832, 7, 22],
+[304164, 832, 10, 9],
+[304299, 833, 2, 21],
+[304336, 833, 3, 30],
+[304367, 833, 4, 30],
+[304447, 833, 7, 19],
+[304585, 833, 12, 4],
+[304624, 834, 1, 12],
+[304789, 834, 6, 26],
+[304959, 834, 12, 13],
+[305001, 835, 1, 24],
+[305191, 835, 8, 2],
+[305228, 835, 9, 8],
+[305402, 836, 2, 29],
+[305451, 836, 4, 18],
+[305503, 836, 6, 9],
+[305554, 836, 7, 30],
+[305563, 836, 8, 8],
+[305618, 836, 10, 2],
+[305652, 836, 11, 5],
+[305680, 836, 12, 3],
+[305719, 837, 1, 11],
+[305751, 837, 2, 12],
+[305799, 837, 4, 1],
+[305898, 837, 7, 9],
+[306069, 837, 12, 27],
+[306113, 838, 2, 9],
+[306236, 838, 6, 12],
+[306292, 838, 8, 7],
+[306464, 839, 1, 26],
+[306555, 839, 4, 27],
+[306568, 839, 5, 10],
+[306669, 839, 8, 19],
+[306845, 840, 2, 11],
+[307042, 840, 8, 26],
+[307225, 841, 2, 25],
+[307354, 841, 7, 4],
+[307361, 841, 7, 11],
+[307422, 841, 9, 10],
+[307542, 842, 1, 8],
+[307705, 842, 6, 20],
+[307887, 842, 12, 19],
+[307933, 843, 2, 3],
+[308063, 843, 6, 13],
+[308235, 843, 12, 2],
+[308392, 844, 5, 7],
+[308419, 844, 6, 3],
+[308559, 844, 10, 21],
+[308583, 844, 11, 14],
+[308639, 845, 1, 9],
+[308792, 845, 6, 11],
+[308893, 845, 9, 20],
+[309020, 846, 1, 25],
+[309057, 846, 3, 3],
+[309130, 846, 5, 15],
+[309175, 846, 6, 29],
+[309373, 847, 1, 13],
+[309472, 847, 4, 22],
+[309541, 847, 6, 30],
+[309571, 847, 7, 30],
+[309748, 848, 1, 23],
+[309923, 848, 7, 16],
+[310015, 848, 10, 16],
+[310104, 849, 1, 13],
+[310209, 849, 4, 28],
+[310218, 849, 5, 7],
+[310314, 849, 8, 11],
+[310352, 849, 9, 18],
+[310438, 849, 12, 13],
+[310463, 850, 1, 7],
+[310468, 850, 1, 12],
+[310597, 850, 5, 21],
+[310754, 850, 10, 25],
+[310837, 851, 1, 16],
+[310994, 851, 6, 22],
+[311169, 851, 12, 14],
+[311357, 852, 6, 19],
+[311438, 852, 9, 8],
+[311635, 853, 3, 24],
+[311816, 853, 9, 21],
+[311823, 853, 9, 28],
+[311961, 854, 2, 13],
+[312065, 854, 5, 28],
+[312227, 854, 11, 6],
+[312406, 855, 5, 4],
+[312493, 855, 7, 30],
+[312554, 855, 9, 29],
+[312602, 855, 11, 16],
+[312759, 856, 4, 21],
+[312906, 856, 9, 15],
+[312912, 856, 9, 21],
+[312962, 856, 11, 10],
+[313086, 857, 3, 14],
+[313206, 857, 7, 12],
+[313298, 857, 10, 12],
+[313362, 857, 12, 15],
+[313497, 858, 4, 29],
+[313617, 858, 8, 27],
+[313796, 859, 2, 22],
+[313962, 859, 8, 7],
+[314041, 859, 10, 25],
+[314077, 859, 11, 30],
+[314178, 860, 3, 10],
+[314253, 860, 5, 24],
+[314377, 860, 9, 25],
+[314391, 860, 10, 9],
+[314455, 860, 12, 12],
+[314614, 861, 5, 20],
+[314785, 861, 11, 7],
+[314863, 862, 1, 24],
+[314996, 862, 6, 6],
+[315049, 862, 7, 29],
+[315109, 862, 9, 27],
+[315251, 863, 2, 16],
+[315437, 863, 8, 21],
+[315569, 863, 12, 31],
+[315624, 864, 2, 24],
+[315778, 864, 7, 27],
+[315887, 864, 11, 13],
+[316055, 865, 4, 30],
+[316174, 865, 8, 27],
+[316210, 865, 10, 2],
+[316322, 866, 1, 22],
+[316455, 866, 6, 4],
+[316485, 866, 7, 4],
+[316538, 866, 8, 26],
+[316621, 866, 11, 17],
+[316748, 867, 3, 24],
+[316939, 867, 10, 1],
+[317056, 868, 1, 26],
+[317239, 868, 7, 27],
+[317316, 868, 10, 12],
+[317367, 868, 12, 2],
+[317454, 869, 2, 27],
+[317504, 869, 4, 18],
+[317560, 869, 6, 13],
+[317577, 869, 6, 30],
+[317675, 869, 10, 6],
+[317733, 869, 12, 3],
+[317759, 869, 12, 29],
+[317950, 870, 7, 8],
+[317959, 870, 7, 17],
+[318098, 870, 12, 3],
+[318233, 871, 4, 17],
+[318329, 871, 7, 22],
+[318511, 872, 1, 20],
+[318561, 872, 3, 10],
+[318589, 872, 4, 7],
+[318767, 872, 10, 2],
+[318900, 873, 2, 12],
+[318973, 873, 4, 26],
+[319011, 873, 6, 3],
+[319113, 873, 9, 13],
+[319249, 874, 1, 27],
+[319367, 874, 5, 25],
+[319503, 874, 10, 8],
+[319528, 874, 11, 2],
+[319648, 875, 3, 2],
+[319670, 875, 3, 24],
+[319822, 875, 8, 23],
+[320012, 876, 2, 29],
+[320122, 876, 6, 18],
+[320300, 876, 12, 13],
+[320481, 877, 6, 12],
+[320532, 877, 8, 2],
+[320712, 878, 1, 29],
+[320876, 878, 7, 12],
+[320880, 878, 7, 16],
+[320901, 878, 8, 6],
+[321082, 879, 2, 3],
+[321236, 879, 7, 7],
+[321326, 879, 10, 5],
+[321377, 879, 11, 25],
+[321381, 879, 11, 29],
+[321390, 879, 12, 8],
+[321498, 880, 3, 25],
+[321650, 880, 8, 24],
+[321659, 880, 9, 2],
+[321809, 881, 1, 30],
+[321894, 881, 4, 25],
+[322072, 881, 10, 20],
+[322109, 881, 11, 26],
+[322248, 882, 4, 14],
+[322268, 882, 5, 4],
+[322321, 882, 6, 26],
+[322386, 882, 8, 30],
+[322397, 882, 9, 10],
+[322488, 882, 12, 10],
+[322623, 883, 4, 24],
+[322786, 883, 10, 4],
+[322844, 883, 12, 1],
+[323028, 884, 6, 2],
+[323088, 884, 8, 1],
+[323204, 884, 11, 25],
+[323402, 885, 6, 11],
+[323487, 885, 9, 4],
+[323665, 886, 3, 1],
+[323856, 886, 9, 8],
+[323900, 886, 10, 22],
+[323957, 886, 12, 18],
+[324135, 887, 6, 14],
+[324333, 887, 12, 29],
+[324511, 888, 6, 24],
+[324671, 888, 12, 1],
+[324763, 889, 3, 3],
+[324870, 889, 6, 18],
+[324996, 889, 10, 22],
+[325049, 889, 12, 14],
+[325054, 889, 12, 19],
+[325059, 889, 12, 24],
+[325079, 890, 1, 13],
+[325214, 890, 5, 28],
+[325364, 890, 10, 25],
+[325449, 891, 1, 18],
+[325512, 891, 3, 22],
+[325581, 891, 5, 30],
+[325717, 891, 10, 13],
+[325784, 891, 12, 19],
+[325832, 892, 2, 5],
+[325843, 892, 2, 16],
+[326021, 892, 8, 12],
+[326188, 893, 1, 26],
+[326273, 893, 4, 21],
+[326355, 893, 7, 12],
+[326363, 893, 7, 20],
+[326523, 893, 12, 27],
+[326545, 894, 1, 18],
+[326636, 894, 4, 19],
+[326766, 894, 8, 27],
+[326918, 895, 1, 26],
+[326924, 895, 2, 1],
+[327104, 895, 7, 31],
+[327195, 895, 10, 30],
+[327364, 896, 4, 16],
+[327547, 896, 10, 16],
+[327708, 897, 3, 26],
+[327894, 897, 9, 28],
+[328063, 898, 3, 16],
+[328129, 898, 5, 21],
+[328287, 898, 10, 26],
+[328465, 899, 4, 22],
+[328471, 899, 4, 28],
+[328539, 899, 7, 5],
+[328601, 899, 9, 5],
+[328730, 900, 1, 12],
+[328903, 900, 7, 4],
+[329011, 900, 10, 20],
+[329075, 900, 12, 23],
+[329097, 901, 1, 14],
+[329256, 901, 6, 22],
+[329315, 901, 8, 20],
+[329502, 902, 2, 23],
+[329618, 902, 6, 19],
+[329812, 902, 12, 30],
+[329815, 903, 1, 2],
+[329958, 903, 5, 25],
+[330143, 903, 11, 26],
+[330288, 904, 4, 19],
+[330367, 904, 7, 7],
+[330438, 904, 9, 16],
+[330566, 905, 1, 22],
+[330755, 905, 7, 30],
+[330759, 905, 8, 3],
+[330781, 905, 8, 25],
+[330934, 906, 1, 25],
+[331037, 906, 5, 8],
+[331040, 906, 5, 11],
+[331196, 906, 10, 14],
+[331219, 906, 11, 6],
+[331237, 906, 11, 24],
+[331384, 907, 4, 20],
+[331548, 907, 10, 1],
+[331677, 908, 2, 7],
+[331817, 908, 6, 26],
+[331843, 908, 7, 22],
+[331873, 908, 8, 21],
+[331886, 908, 9, 3],
+[331972, 908, 11, 28],
+[332145, 909, 5, 20],
+[332223, 909, 8, 6],
+[332371, 910, 1, 1],
+[332570, 910, 7, 19],
+[332766, 911, 1, 31],
+[332892, 911, 6, 6],
+[333046, 911, 11, 7],
+[333066, 911, 11, 27],
+[333100, 911, 12, 31],
+[333188, 912, 3, 28],
+[333343, 912, 8, 30],
+[333484, 913, 1, 18],
+[333630, 913, 6, 13],
+[333759, 913, 10, 20],
+[333871, 914, 2, 9],
+[333993, 914, 6, 11],
+[334088, 914, 9, 14],
+[334202, 915, 1, 6],
+[334211, 915, 1, 15],
+[334382, 915, 7, 5],
+[334450, 915, 9, 11],
+[334568, 916, 1, 7],
+[334670, 916, 4, 18],
+[334697, 916, 5, 15],
+[334790, 916, 8, 16],
+[334982, 917, 2, 24],
+[335177, 917, 9, 7],
+[335198, 917, 9, 28],
+[335254, 917, 11, 23],
+[335427, 918, 5, 15],
+[335530, 918, 8, 26],
+[335683, 919, 1, 26],
+[335813, 919, 6, 5],
+[335972, 919, 11, 11],
+[336045, 920, 1, 23],
+[336046, 920, 1, 24],
+[336160, 920, 5, 17],
+[336230, 920, 7, 26],
+[336292, 920, 9, 26],
+[336357, 920, 11, 30],
+[336382, 920, 12, 25],
+[336409, 921, 1, 21],
+[336451, 921, 3, 4],
+[336472, 921, 3, 25],
+[336505, 921, 4, 27],
+[336682, 921, 10, 21],
+[336707, 921, 11, 15],
+[336764, 922, 1, 11],
+[336901, 922, 5, 28],
+[336948, 922, 7, 14],
+[336960, 922, 7, 26],
+[336972, 922, 8, 7],
+[337029, 922, 10, 3],
+[337072, 922, 11, 15],
+[337200, 923, 3, 23],
+[337389, 923, 9, 28],
+[337534, 924, 2, 20],
+[337707, 924, 8, 11],
+[337719, 924, 8, 23],
+[337755, 924, 9, 28],
+[337796, 924, 11, 8],
+[337861, 925, 1, 12],
+[338051, 925, 7, 21],
+[338134, 925, 10, 12],
+[338218, 926, 1, 4],
+[338325, 926, 4, 21],
+[338345, 926, 5, 11],
+[338425, 926, 7, 30],
+[338575, 926, 12, 27],
+[338696, 927, 4, 27],
+[338758, 927, 6, 28],
+[338893, 927, 11, 10],
+[338981, 928, 2, 6],
+[339179, 928, 8, 22],
+[339281, 928, 12, 2],
+[339344, 929, 2, 3],
+[339476, 929, 6, 15],
+[339522, 929, 7, 31],
+[339633, 929, 11, 19],
+[339692, 930, 1, 17],
+[339846, 930, 6, 20],
+[339857, 930, 7, 1],
+[340027, 930, 12, 18],
+[340135, 931, 4, 5],
+[340167, 931, 5, 7],
+[340190, 931, 5, 30],
+[340385, 931, 12, 11],
+[340506, 932, 4, 10],
+[340553, 932, 5, 27],
+[340699, 932, 10, 20],
+[340770, 932, 12, 30],
+[340811, 933, 2, 9],
+[340976, 933, 7, 24],
+[341153, 934, 1, 17],
+[341232, 934, 4, 6],
+[341345, 934, 7, 28],
+[341456, 934, 11, 16],
+[341469, 934, 11, 29],
+[341549, 935, 2, 17],
+[341656, 935, 6, 4],
+[341703, 935, 7, 21],
+[341895, 936, 1, 29],
+[342028, 936, 6, 10],
+[342072, 936, 7, 24],
+[342167, 936, 10, 27],
+[342317, 937, 3, 26],
+[342412, 937, 6, 29],
+[342480, 937, 9, 5],
+[342663, 938, 3, 7],
+[342664, 938, 3, 8],
+[342854, 938, 9, 14],
+[343032, 939, 3, 11],
+[343067, 939, 4, 15],
+[343082, 939, 4, 30],
+[343135, 939, 6, 22],
+[343157, 939, 7, 14],
+[343305, 939, 12, 9],
+[343346, 940, 1, 19],
+[343512, 940, 7, 3],
+[343682, 940, 12, 20],
+[343775, 941, 3, 23],
+[343785, 941, 4, 2],
+[343960, 941, 9, 24],
+[344005, 941, 11, 8],
+[344156, 942, 4, 8],
+[344189, 942, 5, 11],
+[344348, 942, 10, 17],
+[344521, 943, 4, 8],
+[344538, 943, 4, 25],
+[344614, 943, 7, 10],
+[344791, 944, 1, 3],
+[344827, 944, 2, 8],
+[344957, 944, 6, 17],
+[345107, 944, 11, 14],
+[345158, 945, 1, 4],
+[345303, 945, 5, 29],
+[345469, 945, 11, 11],
+[345556, 946, 2, 6],
+[345558, 946, 2, 8],
+[345737, 946, 8, 6],
+[345756, 946, 8, 25],
+[345770, 946, 9, 8],
+[345798, 946, 10, 6],
+[345972, 947, 3, 29],
+[346053, 947, 6, 18],
+[346171, 947, 10, 14],
+[346336, 948, 3, 27],
+[346533, 948, 10, 10],
+[346570, 948, 11, 16],
+[346649, 949, 2, 3],
+[346798, 949, 7, 2],
+[346919, 949, 10, 31],
+[347015, 950, 2, 4],
+[347130, 950, 5, 30],
+[347214, 950, 8, 22],
+[347344, 950, 12, 30],
+[347439, 951, 4, 4],
+[347442, 951, 4, 7],
+[347633, 951, 10, 15],
+[347753, 952, 2, 12],
+[347843, 952, 5, 12],
+[347872, 952, 6, 10],
+[347884, 952, 6, 22],
+[348036, 952, 11, 21],
+[348059, 952, 12, 14],
+[348096, 953, 1, 20],
+[348143, 953, 3, 8],
+[348310, 953, 8, 22],
+[348374, 953, 10, 25],
+[348506, 954, 3, 6],
+[348639, 954, 7, 17],
+[348670, 954, 8, 17],
+[348839, 955, 2, 2],
+[348954, 955, 5, 28],
+[348959, 955, 6, 2],
+[349059, 955, 9, 10],
+[349141, 955, 12, 1],
+[349293, 956, 5, 1],
+[349361, 956, 7, 8],
+[349412, 956, 8, 28],
+[349593, 957, 2, 25],
+[349631, 957, 4, 4],
+[349810, 957, 9, 30],
+[349841, 957, 10, 31],
+[349994, 958, 4, 2],
+[350133, 958, 8, 19],
+[350271, 959, 1, 4],
+[350353, 959, 3, 27],
+[350358, 959, 4, 1],
+[350420, 959, 6, 2],
+[350431, 959, 6, 13],
+[350607, 959, 12, 6],
+[350752, 960, 4, 29],
+[350894, 960, 9, 18],
+[350934, 960, 10, 28],
+[350937, 960, 10, 31],
+[351125, 961, 5, 7],
+[351211, 961, 8, 1],
+[351257, 961, 9, 16],
+[351405, 962, 2, 11],
+[351541, 962, 6, 27],
+[351629, 962, 9, 23],
+[351742, 963, 1, 14],
+[351791, 963, 3, 4],
+[351916, 963, 7, 7],
+[351921, 963, 7, 12],
+[352086, 963, 12, 24],
+[352117, 964, 1, 24],
+[352193, 964, 4, 9],
+[352343, 964, 9, 6],
+[352442, 964, 12, 14],
+[352632, 965, 6, 22],
+[352816, 965, 12, 23],
+[352893, 966, 3, 10],
+[352979, 966, 6, 4],
+[353176, 966, 12, 18],
+[353259, 967, 3, 11],
+[353410, 967, 8, 9],
+[353427, 967, 8, 26],
+[353430, 967, 8, 29],
+[353432, 967, 8, 31],
+[353472, 967, 10, 10],
+[353646, 968, 4, 1],
+[353807, 968, 9, 9],
+[353841, 968, 10, 13],
+[353944, 969, 1, 24],
+[354131, 969, 7, 30],
+[354220, 969, 10, 27],
+[354399, 970, 4, 24],
+[354598, 970, 11, 9],
+[354760, 971, 4, 20],
+[354874, 971, 8, 12],
+[354901, 971, 9, 8],
+[355070, 972, 2, 24],
+[355228, 972, 7, 31],
+[355361, 972, 12, 11],
+[355371, 972, 12, 21],
+[355481, 973, 4, 10],
+[355614, 973, 8, 21],
+[355694, 973, 11, 9],
+[355789, 974, 2, 12],
+[355867, 974, 5, 1],
+[355957, 974, 7, 30],
+[356009, 974, 9, 20],
+[356096, 974, 12, 16],
+[356247, 975, 5, 16],
+[356259, 975, 5, 28],
+[356370, 975, 9, 16],
+[356461, 975, 12, 16],
+[356586, 976, 4, 19],
+[356660, 976, 7, 2],
+[356779, 976, 10, 29],
+[356957, 977, 4, 25],
+[357029, 977, 7, 6],
+[357151, 977, 11, 5],
+[357203, 977, 12, 27],
+[357230, 978, 1, 23],
+[357328, 978, 5, 1],
+[357367, 978, 6, 9],
+[357499, 978, 10, 19],
+[357567, 978, 12, 26],
+[357748, 979, 6, 25],
+[357946, 980, 1, 9],
+[358095, 980, 6, 6],
+[358214, 980, 10, 3],
+[358260, 980, 11, 18],
+[358442, 981, 5, 19],
+[358565, 981, 9, 19],
+[358710, 982, 2, 11],
+[358877, 982, 7, 28],
+[358982, 982, 11, 10],
+[359136, 983, 4, 13],
+[359298, 983, 9, 22],
+[359343, 983, 11, 6],
+[359504, 984, 4, 15],
+[359506, 984, 4, 17],
+[359603, 984, 7, 23],
+[359735, 984, 12, 2],
+[359848, 985, 3, 25],
+[359919, 985, 6, 4],
+[360090, 985, 11, 22],
+[360176, 986, 2, 16],
+[360208, 986, 3, 20],
+[360338, 986, 7, 28],
+[360510, 987, 1, 16],
+[360684, 987, 7, 9],
+[360732, 987, 8, 26],
+[360765, 987, 9, 28],
+[360876, 988, 1, 17],
+[361047, 988, 7, 6],
+[361084, 988, 8, 12],
+[361136, 988, 10, 3],
+[361317, 989, 4, 2],
+[361488, 989, 9, 20],
+[361661, 990, 3, 12],
+[361828, 990, 8, 26],
+[362005, 991, 2, 19],
+[362182, 991, 8, 15],
+[362331, 992, 1, 11],
+[362370, 992, 2, 19],
+[362416, 992, 4, 5],
+[362497, 992, 6, 25],
+[362534, 992, 8, 1],
+[362615, 992, 10, 21],
+[362795, 993, 4, 19],
+[362941, 993, 9, 12],
+[363086, 994, 2, 4],
+[363190, 994, 5, 19],
+[363227, 994, 6, 25],
+[363390, 994, 12, 5],
+[363524, 995, 4, 18],
+[363686, 995, 9, 27],
+[363882, 996, 4, 10],
+[364026, 996, 9, 1],
+[364218, 997, 3, 12],
+[364257, 997, 4, 20],
+[364328, 997, 6, 30],
+[364391, 997, 9, 1],
+[364416, 997, 9, 26],
+[364569, 998, 2, 26],
+[364576, 998, 3, 5],
+[364670, 998, 6, 7],
+[364708, 998, 7, 15],
+[364898, 999, 1, 21],
+[365086, 999, 7, 28],
+[365282, 1000, 2, 9],
+[365425, 1000, 7, 2],
+[365572, 1000, 11, 26],
+[365751, 1001, 5, 24],
+[365873, 1001, 9, 23],
+[365876, 1001, 9, 26],
+[366023, 1002, 2, 20],
+[366047, 1002, 3, 16],
+[366071, 1002, 4, 9],
+[366215, 1002, 8, 31],
+[366342, 1003, 1, 5],
+[366463, 1003, 5, 6],
+[366663, 1003, 11, 22],
+[366740, 1004, 2, 7],
+[366801, 1004, 4, 8],
+[366823, 1004, 4, 30],
+[366961, 1004, 9, 15],
+[367055, 1004, 12, 18],
+[367166, 1005, 4, 8],
+[367304, 1005, 8, 24],
+[367395, 1005, 11, 23],
+[367402, 1005, 11, 30],
+[367511, 1006, 3, 19],
+[367559, 1006, 5, 6],
+[367671, 1006, 8, 26],
+[367708, 1006, 10, 2],
+[367803, 1007, 1, 5],
+[367910, 1007, 4, 22],
+[367962, 1007, 6, 13],
+[368095, 1007, 10, 24],
+[368108, 1007, 11, 6],
+[368139, 1007, 12, 7],
+[368178, 1008, 1, 15],
+[368324, 1008, 6, 9],
+[368362, 1008, 7, 17],
+[368495, 1008, 11, 27],
+[368642, 1009, 4, 23],
+[368715, 1009, 7, 5],
+[368734, 1009, 7, 24],
+[368757, 1009, 8, 16],
+[368850, 1009, 11, 17],
+[368986, 1010, 4, 2],
+[369078, 1010, 7, 3],
+[369263, 1011, 1, 4],
+[369288, 1011, 1, 29],
+[369435, 1011, 6, 25],
+[369560, 1011, 10, 28],
+[369748, 1012, 5, 3],
+[369767, 1012, 5, 22],
+[369915, 1012, 10, 17],
+[370074, 1013, 3, 25],
+[370209, 1013, 8, 7],
+[370279, 1013, 10, 16],
+[370284, 1013, 10, 21],
+[370331, 1013, 12, 7],
+[370452, 1014, 4, 7],
+[370561, 1014, 7, 25],
+[370751, 1015, 1, 31],
+[370766, 1015, 2, 15],
+[370788, 1015, 3, 9],
+[370972, 1015, 9, 9],
+[371037, 1015, 11, 13],
+[371158, 1016, 3, 13],
+[371162, 1016, 3, 17],
+[371238, 1016, 6, 1],
+[371253, 1016, 6, 16],
+[371310, 1016, 8, 12],
+[371368, 1016, 10, 9],
+[371517, 1017, 3, 7],
+[371535, 1017, 3, 25],
+[371605, 1017, 6, 3],
+[371640, 1017, 7, 8],
+[371676, 1017, 8, 13],
+[371686, 1017, 8, 23],
+[371801, 1017, 12, 16],
+[371903, 1018, 3, 28],
+[372077, 1018, 9, 18],
+[372236, 1019, 2, 24],
+[372322, 1019, 5, 21],
+[372333, 1019, 6, 1],
+[372450, 1019, 9, 26],
+[372480, 1019, 10, 26],
+[372680, 1020, 5, 13],
+[372757, 1020, 7, 29],
+[372881, 1020, 11, 30],
+[373058, 1021, 5, 26],
+[373163, 1021, 9, 8],
+[373256, 1021, 12, 10],
+[373405, 1022, 5, 8],
+[373457, 1022, 6, 29],
+[373498, 1022, 8, 9],
+[373519, 1022, 8, 30],
+[373708, 1023, 3, 7],
+[373724, 1023, 3, 23],
+[373895, 1023, 9, 10],
+[373941, 1023, 10, 26],
+[374102, 1024, 4, 4],
+[374301, 1024, 10, 20],
+[374342, 1024, 11, 30],
+[374479, 1025, 4, 16],
+[374661, 1025, 10, 15],
+[374696, 1025, 11, 19],
+[374711, 1025, 12, 4],
+[374806, 1026, 3, 9],
+[374931, 1026, 7, 12],
+[375121, 1027, 1, 18],
+[375213, 1027, 4, 20],
+[375360, 1027, 9, 14],
+[375373, 1027, 9, 27],
+[375567, 1028, 4, 8],
+[375642, 1028, 6, 22],
+[375705, 1028, 8, 24],
+[375898, 1029, 3, 5],
+[376013, 1029, 6, 28],
+[376144, 1029, 11, 6],
+[376164, 1029, 11, 26],
+[376239, 1030, 2, 9],
+[376312, 1030, 4, 23],
+[376430, 1030, 8, 19],
+[376593, 1031, 1, 29],
+[376769, 1031, 7, 24],
+[376837, 1031, 9, 30],
+[376902, 1031, 12, 4],
+[376970, 1032, 2, 10],
+[377122, 1032, 7, 11],
+[377261, 1032, 11, 27],
+[377392, 1033, 4, 7],
+[377528, 1033, 8, 21],
+[377690, 1034, 1, 30],
+[377732, 1034, 3, 13],
+[377793, 1034, 5, 13],
+[377902, 1034, 8, 30],
+[377998, 1034, 12, 4],
+[378005, 1034, 12, 11],
+[378125, 1035, 4, 10],
+[378283, 1035, 9, 15],
+[378339, 1035, 11, 10],
+[378487, 1036, 4, 6],
+[378547, 1036, 6, 5],
+[378730, 1036, 12, 5],
+[378892, 1037, 5, 16],
+[379084, 1037, 11, 24],
+[379182, 1038, 3, 2],
+[379210, 1038, 3, 30],
+[379340, 1038, 8, 7],
+[379387, 1038, 9, 23],
+[379572, 1039, 3, 27],
+[379606, 1039, 4, 30],
+[379789, 1039, 10, 30],
+[379858, 1040, 1, 7],
+[380020, 1040, 6, 17],
+[380096, 1040, 9, 1],
+[380199, 1040, 12, 13],
+[380337, 1041, 4, 30],
+[380374, 1041, 6, 6],
+[380395, 1041, 6, 27],
+[380572, 1041, 12, 21],
+[380657, 1042, 3, 16],
+[380826, 1042, 9, 1],
+[380952, 1043, 1, 5],
+[380961, 1043, 1, 14],
+[381110, 1043, 6, 12],
+[381170, 1043, 8, 11],
+[381319, 1044, 1, 7],
+[381344, 1044, 2, 1],
+[381356, 1044, 2, 13],
+[381502, 1044, 7, 8],
+[381685, 1045, 1, 7],
+[381690, 1045, 1, 12],
+[381862, 1045, 7, 3],
+[381951, 1045, 9, 30],
+[382116, 1046, 3, 14],
+[382170, 1046, 5, 7],
+[382253, 1046, 7, 29],
+[382393, 1046, 12, 16],
+[382572, 1047, 6, 13],
+[382602, 1047, 7, 13],
+[382605, 1047, 7, 16],
+[382776, 1048, 1, 3],
+[382797, 1048, 1, 24],
+[382976, 1048, 7, 21],
+[383153, 1049, 1, 14],
+[383213, 1049, 3, 15],
+[383383, 1049, 9, 1],
+[383571, 1050, 3, 8],
+[383717, 1050, 8, 1],
+[383741, 1050, 8, 25],
+[383792, 1050, 10, 15],
+[383892, 1051, 1, 23],
+[383987, 1051, 4, 28],
+[384088, 1051, 8, 7],
+[384230, 1051, 12, 27],
+[384404, 1052, 6, 18],
+[384470, 1052, 8, 23],
+[384491, 1052, 9, 13],
+[384537, 1052, 10, 29],
+[384688, 1053, 3, 29],
+[384729, 1053, 5, 9],
+[384800, 1053, 7, 19],
+[384803, 1053, 7, 22],
+[384959, 1053, 12, 25],
+[385021, 1054, 2, 25],
+[385187, 1054, 8, 10],
+[385295, 1054, 11, 26],
+[385451, 1055, 5, 1],
+[385499, 1055, 6, 18],
+[385667, 1055, 12, 3],
+[385758, 1056, 3, 3],
+[385917, 1056, 8, 9],
+[386005, 1056, 11, 5],
+[386129, 1057, 3, 9],
+[386130, 1057, 3, 10],
+[386205, 1057, 5, 24],
+[386208, 1057, 5, 27],
+[386361, 1057, 10, 27],
+[386418, 1057, 12, 23],
+[386464, 1058, 2, 7],
+[386468, 1058, 2, 11],
+[386636, 1058, 7, 29],
+[386811, 1059, 1, 20],
+[386909, 1059, 4, 28],
+[386973, 1059, 7, 1],
+[387133, 1059, 12, 8],
+[387145, 1059, 12, 20],
+[387171, 1060, 1, 15],
+[387269, 1060, 4, 22],
+[387467, 1060, 11, 6],
+[387572, 1061, 2, 19],
+[387702, 1061, 6, 29],
+[387747, 1061, 8, 13],
+[387800, 1061, 10, 5],
+[387972, 1062, 3, 26],
+[388073, 1062, 7, 5],
+[388150, 1062, 9, 20],
+[388155, 1062, 9, 25],
+[388319, 1063, 3, 8],
+[388472, 1063, 8, 8],
+[388611, 1063, 12, 25],
+[388631, 1064, 1, 14],
+[388796, 1064, 6, 27],
+[388962, 1064, 12, 10],
+[389101, 1065, 4, 28],
+[389292, 1065, 11, 5],
+[389417, 1066, 3, 10],
+[389571, 1066, 8, 11],
+[389754, 1067, 2, 10],
+[389922, 1067, 7, 28],
+[390023, 1067, 11, 6],
+[390197, 1068, 4, 28],
+[390203, 1068, 5, 4],
+[390348, 1068, 9, 26],
+[390493, 1069, 2, 18],
+[390647, 1069, 7, 22],
+[390703, 1069, 9, 16],
+[390706, 1069, 9, 19],
+[390748, 1069, 10, 31],
+[390853, 1070, 2, 13],
+[390961, 1070, 6, 1],
+[391050, 1070, 8, 29],
+[391106, 1070, 10, 24],
+[391239, 1071, 3, 6],
+[391279, 1071, 4, 15],
+[391302, 1071, 5, 8],
+[391364, 1071, 7, 9],
+[391562, 1072, 1, 23],
+[391565, 1072, 1, 26],
+[391745, 1072, 7, 24],
+[391871, 1072, 11, 27],
+[391983, 1073, 3, 19],
+[392131, 1073, 8, 14],
+[392323, 1074, 2, 22],
+[392420, 1074, 5, 30],
+[392430, 1074, 6, 9],
+[392596, 1074, 11, 22],
+[392637, 1075, 1, 2],
+[392748, 1075, 4, 23],
+[392856, 1075, 8, 9],
+[392918, 1075, 10, 10],
+[392947, 1075, 11, 8],
+[393123, 1076, 5, 2],
+[393312, 1076, 11, 7],
+[393373, 1077, 1, 7],
+[393442, 1077, 3, 17],
+[393599, 1077, 8, 21],
+[393619, 1077, 9, 10],
+[393770, 1078, 2, 8],
+[393794, 1078, 3, 4],
+[393932, 1078, 7, 20],
+[394107, 1079, 1, 11],
+[394265, 1079, 6, 18],
+[394345, 1079, 9, 6],
+[394496, 1080, 2, 4],
+[394589, 1080, 5, 7],
+[394620, 1080, 6, 7],
+[394773, 1080, 11, 7],
+[394811, 1080, 12, 15],
+[394923, 1081, 4, 6],
+[395109, 1081, 10, 9],
+[395192, 1081, 12, 31],
+[395200, 1082, 1, 8],
+[395315, 1082, 5, 3],
+[395337, 1082, 5, 25],
+[395490, 1082, 10, 25],
+[395573, 1083, 1, 16],
+[395657, 1083, 4, 10],
+[395722, 1083, 6, 14],
+[395760, 1083, 7, 22],
+[395790, 1083, 8, 21],
+[395869, 1083, 11, 8],
+[395989, 1084, 3, 7],
+[396070, 1084, 5, 27],
+[396262, 1084, 12, 5],
+[396340, 1085, 2, 21],
+[396385, 1085, 4, 7],
+[396450, 1085, 6, 11],
+[396500, 1085, 7, 31],
+[396557, 1085, 9, 26],
+[396735, 1086, 3, 23],
+[396747, 1086, 4, 4],
+[396894, 1086, 8, 29],
+[396943, 1086, 10, 17],
+[396978, 1086, 11, 21],
+[397103, 1087, 3, 26],
+[397241, 1087, 8, 11],
+[397370, 1087, 12, 18],
+[397460, 1088, 3, 17],
+[397650, 1088, 9, 23],
+[397825, 1089, 3, 17],
+[397970, 1089, 8, 9],
+[398088, 1089, 12, 5],
+[398214, 1090, 4, 10],
+[398228, 1090, 4, 24],
+[398262, 1090, 5, 28],
+[398457, 1090, 12, 9],
+[398579, 1091, 4, 10],
+[398695, 1091, 8, 4],
+[398779, 1091, 10, 27],
+[398844, 1091, 12, 31],
+[398955, 1092, 4, 20],
+[398984, 1092, 5, 19],
+[399054, 1092, 7, 28],
+[399239, 1093, 1, 29],
+[399250, 1093, 2, 9],
+[399385, 1093, 6, 24],
+[399409, 1093, 7, 18],
+[399480, 1093, 9, 27],
+[399574, 1093, 12, 30],
+[399596, 1094, 1, 21],
+[399717, 1094, 5, 22],
+[399723, 1094, 5, 28],
+[399783, 1094, 7, 27],
+[399789, 1094, 8, 2],
+[399861, 1094, 10, 13],
+[399912, 1094, 12, 3],
+[400030, 1095, 3, 31],
+[400228, 1095, 10, 15],
+[400406, 1096, 4, 10],
+[400544, 1096, 8, 26],
+[400653, 1096, 12, 13],
+[400756, 1097, 3, 26],
+[400911, 1097, 8, 28],
+[400919, 1097, 9, 5],
+[400924, 1097, 9, 10],
+[400975, 1097, 10, 31],
+[401166, 1098, 5, 10],
+[401263, 1098, 8, 15],
+[401463, 1099, 3, 3],
+[401609, 1099, 7, 27],
+[401802, 1100, 2, 5],
+[401835, 1100, 3, 10],
+[401880, 1100, 4, 24],
+[402018, 1100, 9, 9],
+[402177, 1101, 2, 15],
+[402200, 1101, 3, 10],
+[402399, 1101, 9, 25],
+[402542, 1102, 2, 15],
+[402668, 1102, 6, 21],
+[402706, 1102, 7, 29],
+[402807, 1102, 11, 7],
+[402864, 1103, 1, 3],
+[403043, 1103, 7, 1],
+[403162, 1103, 10, 28],
+[403203, 1103, 12, 8],
+[403221, 1103, 12, 26],
+[403416, 1104, 7, 8],
+[403605, 1105, 1, 13],
+[403766, 1105, 6, 23],
+[403847, 1105, 9, 12],
+[403913, 1105, 11, 17],
+[404056, 1106, 4, 9],
+[404064, 1106, 4, 17],
+[404135, 1106, 6, 27],
+[404239, 1106, 10, 9],
+[404392, 1107, 3, 11],
+[404447, 1107, 5, 5],
+[404527, 1107, 7, 24],
+[404600, 1107, 10, 5],
+[404774, 1108, 3, 27],
+[404813, 1108, 5, 5],
+[404985, 1108, 10, 24],
+[405086, 1109, 2, 2],
+[405217, 1109, 6, 13],
+[405351, 1109, 10, 25],
+[405373, 1109, 11, 16],
+[405456, 1110, 2, 7],
+[405597, 1110, 6, 28],
+[405754, 1110, 12, 2],
+[405859, 1111, 3, 17],
+[405928, 1111, 5, 25],
+[406109, 1111, 11, 22],
+[406274, 1112, 5, 5],
+[406362, 1112, 8, 1],
+[406474, 1112, 11, 21],
+[406624, 1113, 4, 20],
+[406697, 1113, 7, 2],
+[406745, 1113, 8, 19],
+[406824, 1113, 11, 6],
+[406851, 1113, 12, 3],
+[406863, 1113, 12, 15],
+[406886, 1114, 1, 7],
+[406922, 1114, 2, 12],
+[407084, 1114, 7, 24],
+[407124, 1114, 9, 2],
+[407251, 1115, 1, 7],
+[407307, 1115, 3, 4],
+[407383, 1115, 5, 19],
+[407401, 1115, 6, 6],
+[407508, 1115, 9, 21],
+[407694, 1116, 3, 25],
+[407867, 1116, 9, 14],
+[407941, 1116, 11, 27],
+[407989, 1117, 1, 14],
+[408064, 1117, 3, 30],
+[408145, 1117, 6, 19],
+[408189, 1117, 8, 2],
+[408316, 1117, 12, 7],
+[408501, 1118, 6, 10],
+[408655, 1118, 11, 11],
+[408713, 1119, 1, 8],
+[408813, 1119, 4, 18],
+[408938, 1119, 8, 21],
+[409015, 1119, 11, 6],
+[409076, 1120, 1, 6],
+[409193, 1120, 5, 2],
+[409369, 1120, 10, 25],
+[409527, 1121, 4, 1],
+[409584, 1121, 5, 28],
+[409713, 1121, 10, 4],
+[409890, 1122, 3, 30],
+[410038, 1122, 8, 25],
+[410056, 1122, 9, 12],
+[410110, 1122, 11, 5],
+[410263, 1123, 4, 7],
+[410272, 1123, 4, 16],
+[410370, 1123, 7, 23],
+[410544, 1124, 1, 13],
+[410718, 1124, 7, 5],
+[410806, 1124, 10, 1],
+[410833, 1124, 10, 28],
+[410960, 1125, 3, 4],
+[411031, 1125, 5, 14],
+[411134, 1125, 8, 25],
+[411209, 1125, 11, 8],
+[411300, 1126, 2, 7],
+[411373, 1126, 4, 21],
+[411378, 1126, 4, 26],
+[411397, 1126, 5, 15],
+[411556, 1126, 10, 21],
+[411739, 1127, 4, 22],
+[411923, 1127, 10, 23],
+[411968, 1127, 12, 7],
+[412052, 1128, 2, 29],
+[412066, 1128, 3, 14],
+[412149, 1128, 6, 5],
+[412308, 1128, 11, 11],
+[412367, 1129, 1, 9],
+[412515, 1129, 6, 6],
+[412674, 1129, 11, 12],
+[412861, 1130, 5, 18],
+[412867, 1130, 5, 24],
+[412956, 1130, 8, 21],
+[413015, 1130, 10, 19],
+[413190, 1131, 4, 12],
+[413353, 1131, 9, 22],
+[413466, 1132, 1, 13],
+[413542, 1132, 3, 29],
+[413670, 1132, 8, 4],
+[413828, 1133, 1, 9],
+[414001, 1133, 7, 1],
+[414030, 1133, 7, 30],
+[414109, 1133, 10, 17],
+[414227, 1134, 2, 12],
+[414301, 1134, 4, 27],
+[414341, 1134, 6, 6],
+[414540, 1134, 12, 22],
+[414577, 1135, 1, 28],
+[414626, 1135, 3, 18],
+[414648, 1135, 4, 9],
+[414829, 1135, 10, 7],
+[414929, 1136, 1, 15],
+[415050, 1136, 5, 15],
+[415134, 1136, 8, 7],
+[415333, 1137, 2, 22],
+[415377, 1137, 4, 7],
+[415474, 1137, 7, 13],
+[415536, 1137, 9, 13],
+[415646, 1138, 1, 1],
+[415753, 1138, 4, 18],
+[415904, 1138, 9, 16],
+[415946, 1138, 10, 28],
+[416016, 1139, 1, 6],
+[416034, 1139, 1, 24],
+[416160, 1139, 5, 30],
+[416191, 1139, 6, 30],
+[416308, 1139, 10, 25],
+[416419, 1140, 2, 13],
+[416453, 1140, 3, 18],
+[416540, 1140, 6, 13],
+[416732, 1140, 12, 22],
+[416810, 1141, 3, 10],
+[416995, 1141, 9, 11],
+[417001, 1141, 9, 17],
+[417102, 1141, 12, 27],
+[417277, 1142, 6, 20],
+[417312, 1142, 7, 25],
+[417317, 1142, 7, 30],
+[417412, 1142, 11, 2],
+[417518, 1143, 2, 16],
+[417523, 1143, 2, 21],
+[417690, 1143, 8, 7],
+[417841, 1144, 1, 5],
+[417854, 1144, 1, 18],
+[417984, 1144, 5, 27],
+[418036, 1144, 7, 18],
+[418054, 1144, 8, 5],
+[418148, 1144, 11, 7],
+[418331, 1145, 5, 9],
+[418381, 1145, 6, 28],
+[418563, 1145, 12, 27],
+[418641, 1146, 3, 15],
+[418806, 1146, 8, 27],
+[418849, 1146, 10, 9],
+[418864, 1146, 10, 24],
+[419037, 1147, 4, 15],
+[419132, 1147, 7, 19],
+[419290, 1147, 12, 24],
+[419374, 1148, 3, 17],
+[419413, 1148, 4, 25],
+[419457, 1148, 6, 8],
+[419487, 1148, 7, 8],
+[419645, 1148, 12, 13],
+[419792, 1149, 5, 9],
+[419873, 1149, 7, 29],
+[419968, 1149, 11, 1],
+[419991, 1149, 11, 24],
+[420104, 1150, 3, 17],
+[420173, 1150, 5, 25],
+[420347, 1150, 11, 15],
+[420394, 1151, 1, 1],
+[420549, 1151, 6, 5],
+[420600, 1151, 7, 26],
+[420646, 1151, 9, 10],
+[420726, 1151, 11, 29],
+[420891, 1152, 5, 12],
+[421025, 1152, 9, 23],
+[421199, 1153, 3, 16],
+[421201, 1153, 3, 18],
+[421279, 1153, 6, 4],
+[421316, 1153, 7, 11],
+[421460, 1153, 12, 2],
+[421508, 1154, 1, 19],
+[421562, 1154, 3, 14],
+[421607, 1154, 4, 28],
+[421774, 1154, 10, 12],
+[421776, 1154, 10, 14],
+[421899, 1155, 2, 14],
+[421955, 1155, 4, 11],
+[422006, 1155, 6, 1],
+[422200, 1155, 12, 12],
+[422215, 1155, 12, 27],
+[422345, 1156, 5, 5],
+[422463, 1156, 8, 31],
+[422628, 1157, 2, 12],
+[422741, 1157, 6, 5],
+[422921, 1157, 12, 2],
+[423079, 1158, 5, 9],
+[423256, 1158, 11, 2],
+[423418, 1159, 4, 13],
+[423565, 1159, 9, 7],
+[423677, 1159, 12, 28],
+[423691, 1160, 1, 11],
+[423818, 1160, 5, 17],
+[423850, 1160, 6, 18],
+[424013, 1160, 11, 28],
+[424044, 1160, 12, 29],
+[424064, 1161, 1, 18],
+[424194, 1161, 5, 28],
+[424314, 1161, 9, 25],
+[424427, 1162, 1, 16],
+[424431, 1162, 1, 20],
+[424631, 1162, 8, 8],
+[424681, 1162, 9, 27],
+[424757, 1162, 12, 12],
+[424929, 1163, 6, 2],
+[424980, 1163, 7, 23],
+[425155, 1164, 1, 14],
+[425337, 1164, 7, 14],
+[425454, 1164, 11, 8],
+[425464, 1164, 11, 18],
+[425494, 1164, 12, 18],
+[425690, 1165, 7, 2],
+[425875, 1166, 1, 3],
+[426058, 1166, 7, 5],
+[426226, 1166, 12, 20],
+[426386, 1167, 5, 29],
+[426572, 1167, 12, 1],
+[426603, 1168, 1, 1],
+[426706, 1168, 4, 13],
+[426849, 1168, 9, 3],
+[426942, 1168, 12, 5],
+[426992, 1169, 1, 24],
+[427125, 1169, 6, 6],
+[427152, 1169, 7, 3],
+[427350, 1170, 1, 17],
+[427402, 1170, 3, 10],
+[427537, 1170, 7, 23],
+[427678, 1170, 12, 11],
+[427686, 1170, 12, 19],
+[427842, 1171, 5, 24],
+[427929, 1171, 8, 19],
+[428115, 1172, 2, 21],
+[428287, 1172, 8, 11],
+[428378, 1172, 11, 10],
+[428427, 1172, 12, 29],
+[428542, 1173, 4, 23],
+[428551, 1173, 5, 2],
+[428687, 1173, 9, 15],
+[428843, 1174, 2, 18],
+[428922, 1174, 5, 8],
+[428960, 1174, 6, 15],
+[429052, 1174, 9, 15],
+[429082, 1174, 10, 15],
+[429247, 1175, 3, 29],
+[429391, 1175, 8, 20],
+[429554, 1176, 1, 30],
+[429732, 1176, 7, 26],
+[429930, 1177, 2, 9],
+[429974, 1177, 3, 25],
+[430004, 1177, 4, 24],
+[430022, 1177, 5, 12],
+[430044, 1177, 6, 3],
+[430122, 1177, 8, 20],
+[430277, 1178, 1, 22],
+[430333, 1178, 3, 19],
+[430392, 1178, 5, 17],
+[430403, 1178, 5, 28],
+[430521, 1178, 9, 23],
+[430627, 1179, 1, 7],
+[430703, 1179, 3, 24],
+[430734, 1179, 4, 24],
+[430771, 1179, 5, 31],
+[430884, 1179, 9, 21],
+[430947, 1179, 11, 23],
+[431015, 1180, 1, 30],
+[431075, 1180, 3, 30],
+[431194, 1180, 7, 27],
+[431369, 1181, 1, 18],
+[431407, 1181, 2, 25],
+[431503, 1181, 6, 1],
+[431649, 1181, 10, 25],
+[431778, 1182, 3, 3],
+[431845, 1182, 5, 9],
+[431983, 1182, 9, 24],
+[432093, 1183, 1, 12],
+[432234, 1183, 6, 2],
+[432269, 1183, 7, 7],
+[432461, 1184, 1, 15],
+[432614, 1184, 6, 16],
+[432741, 1184, 10, 21],
+[432934, 1185, 5, 2],
+[433049, 1185, 8, 25],
+[433233, 1186, 2, 25],
+[433267, 1186, 3, 31],
+[433358, 1186, 6, 30],
+[433557, 1187, 1, 15],
+[433754, 1187, 7, 31],
+[433914, 1188, 1, 7],
+[434104, 1188, 7, 15],
+[434134, 1188, 8, 14],
+[434164, 1188, 9, 13],
+[434267, 1188, 12, 25],
+[434402, 1189, 5, 9],
+[434601, 1189, 11, 24],
+[434694, 1190, 2, 25],
+[434700, 1190, 3, 3],
+[434841, 1190, 7, 22],
+[435020, 1191, 1, 17],
+[435117, 1191, 4, 24],
+[435166, 1191, 6, 12],
+[435325, 1191, 11, 18],
+[435400, 1192, 2, 1],
+[435509, 1192, 5, 20],
+[435679, 1192, 11, 6],
+[435865, 1193, 5, 11],
+[436063, 1193, 11, 25],
+[436131, 1194, 2, 1],
+[436300, 1194, 7, 20],
+[436326, 1194, 8, 15],
+[436487, 1195, 1, 23],
+[436687, 1195, 8, 11],
+[436736, 1195, 9, 29],
+[436919, 1196, 3, 30],
+[437009, 1196, 6, 28],
+[437178, 1196, 12, 14],
+[437353, 1197, 6, 7],
+[437379, 1197, 7, 3],
+[437489, 1197, 10, 21],
+[437492, 1197, 10, 24],
+[437512, 1197, 11, 13],
+[437562, 1198, 1, 2],
+[437758, 1198, 7, 17],
+[437866, 1198, 11, 2],
+[437985, 1199, 3, 1],
+[438078, 1199, 6, 2],
+[438157, 1199, 8, 20],
+[438180, 1199, 9, 12],
+[438351, 1200, 3, 1],
+[438424, 1200, 5, 13],
+[438618, 1200, 11, 23],
+[438657, 1201, 1, 1],
+[438812, 1201, 6, 5],
+[438991, 1201, 12, 1],
+[439057, 1202, 2, 5],
+[439059, 1202, 2, 7],
+[439098, 1202, 3, 18],
+[439286, 1202, 9, 22],
+[439370, 1202, 12, 15],
+[439504, 1203, 4, 28],
+[439569, 1203, 7, 2],
+[439684, 1203, 10, 25],
+[439801, 1204, 2, 19],
+[439856, 1204, 4, 14],
+[439950, 1204, 7, 17],
+[440002, 1204, 9, 7],
+[440034, 1204, 10, 9],
+[440060, 1204, 11, 4],
+[440078, 1204, 11, 22],
+[440150, 1205, 2, 2],
+[440153, 1205, 2, 5],
+[440350, 1205, 8, 21],
+[440488, 1206, 1, 6],
+[440605, 1206, 5, 3],
+[440658, 1206, 6, 25],
+[440683, 1206, 7, 20],
+[440776, 1206, 10, 21],
+[440934, 1207, 3, 28],
+[441118, 1207, 9, 28],
+[441145, 1207, 10, 25],
+[441297, 1208, 3, 25],
+[441479, 1208, 9, 23],
+[441527, 1208, 11, 10],
+[441689, 1209, 4, 21],
+[441738, 1209, 6, 9],
+[441746, 1209, 6, 17],
+[441900, 1209, 11, 18],
+[442094, 1210, 5, 31],
+[442259, 1210, 11, 12],
+[442418, 1211, 4, 20],
+[442555, 1211, 9, 4],
+[442616, 1211, 11, 4],
+[442787, 1212, 4, 23],
+[442792, 1212, 4, 28],
+[442903, 1212, 8, 17],
+[442921, 1212, 9, 4],
+[442988, 1212, 11, 10],
+[443143, 1213, 4, 14],
+[443158, 1213, 4, 29],
+[443343, 1213, 10, 31],
+[443521, 1214, 4, 27],
+[443637, 1214, 8, 21],
+[443827, 1215, 2, 27],
+[443918, 1215, 5, 29],
+[443938, 1215, 6, 18],
+[443989, 1215, 8, 8],
+[444161, 1216, 1, 27],
+[444209, 1216, 3, 15],
+[444213, 1216, 3, 19],
+[444308, 1216, 6, 22],
+[444348, 1216, 8, 1],
+[444442, 1216, 11, 3],
+[444498, 1216, 12, 29],
+[444677, 1217, 6, 26],
+[444852, 1217, 12, 18],
+[445012, 1218, 5, 27],
+[445020, 1218, 6, 4],
+[445173, 1218, 11, 4],
+[445242, 1219, 1, 12],
+[445377, 1219, 5, 27],
+[445476, 1219, 9, 3],
+[445545, 1219, 11, 11],
+[445669, 1220, 3, 14],
+[445695, 1220, 4, 9],
+[445708, 1220, 4, 22],
+[445809, 1220, 8, 1],
+[445814, 1220, 8, 6],
+[445967, 1221, 1, 6],
+[445973, 1221, 1, 12],
+[446099, 1221, 5, 18],
+[446254, 1221, 10, 20],
+[446355, 1222, 1, 29],
+[446464, 1222, 5, 18],
+[446602, 1222, 10, 3],
+[446727, 1223, 2, 5],
+[446812, 1223, 5, 1],
+[446898, 1223, 7, 26],
+[447024, 1223, 11, 29],
+[447084, 1224, 1, 28],
+[447254, 1224, 7, 16],
+[447365, 1224, 11, 4],
+[447475, 1225, 2, 22],
+[447668, 1225, 9, 3],
+[447865, 1226, 3, 19],
+[447904, 1226, 4, 27],
+[448059, 1226, 9, 29],
+[448210, 1227, 2, 27],
+[448211, 1227, 2, 28],
+[448280, 1227, 5, 8],
+[448353, 1227, 7, 20],
+[448529, 1228, 1, 12],
+[448558, 1228, 2, 10],
+[448695, 1228, 6, 26],
+[448719, 1228, 7, 20],
+[448772, 1228, 9, 11],
+[448773, 1228, 9, 12],
+[448923, 1229, 2, 9],
+[449085, 1229, 7, 21],
+[449247, 1229, 12, 30],
+[449386, 1230, 5, 18],
+[449483, 1230, 8, 23],
+[449538, 1230, 10, 17],
+[449645, 1231, 2, 1],
+[449686, 1231, 3, 14],
+[449794, 1231, 6, 30],
+[449994, 1232, 1, 16],
+[450139, 1232, 6, 9],
+[450155, 1232, 6, 25],
+[450286, 1232, 11, 3],
+[450406, 1233, 3, 3],
+[450419, 1233, 3, 16],
+[450617, 1233, 9, 30],
+[450744, 1234, 2, 4],
+[450891, 1234, 7, 1],
+[451088, 1235, 1, 14],
+[451186, 1235, 4, 22],
+[451356, 1235, 10, 9],
+[451447, 1236, 1, 8],
+[451566, 1236, 5, 6],
+[451642, 1236, 7, 21],
+[451744, 1236, 10, 31],
+[451874, 1237, 3, 10],
+[451892, 1237, 3, 28],
+[451994, 1237, 7, 8],
+[452148, 1237, 12, 9],
+[452219, 1238, 2, 18],
+[452254, 1238, 3, 25],
+[452374, 1238, 7, 23],
+[452393, 1238, 8, 11],
+[452509, 1238, 12, 5],
+[452569, 1239, 2, 3],
+[452721, 1239, 7, 5],
+[452902, 1240, 1, 2],
+[452964, 1240, 3, 4],
+[452971, 1240, 3, 11],
+[453150, 1240, 9, 6],
+[453326, 1241, 3, 1],
+[453514, 1241, 9, 5],
+[453634, 1242, 1, 3],
+[453829, 1242, 7, 17],
+[453900, 1242, 9, 26],
+[454044, 1243, 2, 17],
+[454126, 1243, 5, 10],
+[454129, 1243, 5, 13],
+[454222, 1243, 8, 14],
+[454326, 1243, 11, 26],
+[454524, 1244, 6, 11],
+[454536, 1244, 6, 23],
+[454680, 1244, 11, 14],
+[454829, 1245, 4, 12],
+[454875, 1245, 5, 28],
+[454974, 1245, 9, 4],
+[454985, 1245, 9, 15],
+[455148, 1246, 2, 25],
+[455192, 1246, 4, 10],
+[455266, 1246, 6, 23],
+[455289, 1246, 7, 16],
+[455291, 1246, 7, 18],
+[455398, 1246, 11, 2],
+[455529, 1247, 3, 13],
+[455614, 1247, 6, 6],
+[455647, 1247, 7, 9],
+[455685, 1247, 8, 16],
+[455794, 1247, 12, 3],
+[455870, 1248, 2, 17],
+[455919, 1248, 4, 6],
+[456077, 1248, 9, 11],
+[456251, 1249, 3, 4],
+[456308, 1249, 4, 30],
+[456489, 1249, 10, 28],
+[456519, 1249, 11, 27],
+[456608, 1250, 2, 24],
+[456647, 1250, 4, 4],
+[456810, 1250, 9, 14],
+[456865, 1250, 11, 8],
+[456995, 1251, 3, 18],
+[457006, 1251, 3, 29],
+[457202, 1251, 10, 11],
+[457265, 1251, 12, 13],
+[457308, 1252, 1, 25],
+[457477, 1252, 7, 12],
+[457504, 1252, 8, 8],
+[457593, 1252, 11, 5],
+[457688, 1253, 2, 8],
+[457707, 1253, 2, 27],
+[457738, 1253, 3, 30],
+[457793, 1253, 5, 24],
+[457960, 1253, 11, 7],
+[457962, 1253, 11, 9],
+[458069, 1254, 2, 24],
+[458115, 1254, 4, 11],
+[458158, 1254, 5, 24],
+[458201, 1254, 7, 6],
+[458205, 1254, 7, 10],
+[458325, 1254, 11, 7],
+[458503, 1255, 5, 4],
+[458543, 1255, 6, 13],
+[458703, 1255, 11, 20],
+[458841, 1256, 4, 6],
+[459027, 1256, 10, 9],
+[459111, 1257, 1, 1],
+[459271, 1257, 6, 10],
+[459456, 1257, 12, 12],
+[459473, 1257, 12, 29],
+[459534, 1258, 2, 28],
+[459698, 1258, 8, 11],
+[459782, 1258, 11, 3],
+[459831, 1258, 12, 22],
+[459921, 1259, 3, 22],
+[460083, 1259, 8, 31],
+[460102, 1259, 9, 19],
+[460256, 1260, 2, 20],
+[460295, 1260, 3, 30],
+[460446, 1260, 8, 28],
+[460461, 1260, 9, 12],
+[460469, 1260, 9, 20],
+[460474, 1260, 9, 25],
+[460628, 1261, 2, 26],
+[460677, 1261, 4, 16],
+[460687, 1261, 4, 26],
+[460690, 1261, 4, 29],
+[460722, 1261, 5, 31],
+[460915, 1261, 12, 10],
+[461062, 1262, 5, 6],
+[461090, 1262, 6, 3],
+[461171, 1262, 8, 23],
+[461196, 1262, 9, 17],
+[461255, 1262, 11, 15],
+[461402, 1263, 4, 11],
+[461563, 1263, 9, 19],
+[461711, 1264, 2, 14],
+[461846, 1264, 6, 28],
+[461945, 1264, 10, 5],
+[462137, 1265, 4, 15],
+[462192, 1265, 6, 9],
+[462266, 1265, 8, 22],
+[462320, 1265, 10, 15],
+[462482, 1266, 3, 26],
+[462500, 1266, 4, 13],
+[462695, 1266, 10, 25],
+[462853, 1267, 4, 1],
+[462981, 1267, 8, 7],
+[463181, 1268, 2, 23],
+[463340, 1268, 7, 31],
+[463434, 1268, 11, 2],
+[463561, 1269, 3, 9],
+[463734, 1269, 8, 29],
+[463925, 1270, 3, 8],
+[463951, 1270, 4, 3],
+[464104, 1270, 9, 3],
+[464201, 1270, 12, 9],
+[464303, 1271, 3, 21],
+[464476, 1271, 9, 10],
+[464643, 1272, 2, 24],
+[464834, 1272, 9, 2],
+[464990, 1273, 2, 5],
+[465130, 1273, 6, 25],
+[465315, 1273, 12, 27],
+[465350, 1274, 1, 31],
+[465437, 1274, 4, 28],
+[465482, 1274, 6, 12],
+[465557, 1274, 8, 26],
+[465735, 1275, 2, 20],
+[465935, 1275, 9, 8],
+[466005, 1275, 11, 17],
+[466185, 1276, 5, 15],
+[466289, 1276, 8, 27],
+[466345, 1276, 10, 22],
+[466470, 1277, 2, 24],
+[466561, 1277, 5, 26],
+[466680, 1277, 9, 22],
+[466850, 1278, 3, 11],
+[466958, 1278, 6, 27],
+[467106, 1278, 11, 22],
+[467113, 1278, 11, 29],
+[467187, 1279, 2, 11],
+[467346, 1279, 7, 20],
+[467508, 1279, 12, 29],
+[467540, 1280, 1, 30],
+[467689, 1280, 6, 27],
+[467803, 1280, 10, 19],
+[467932, 1281, 2, 25],
+[467953, 1281, 3, 18],
+[468028, 1281, 6, 1],
+[468224, 1281, 12, 14],
+[468393, 1282, 6, 1],
+[468488, 1282, 9, 4],
+[468504, 1282, 9, 20],
+[468551, 1282, 11, 6],
+[468631, 1283, 1, 25],
+[468702, 1283, 4, 6],
+[468877, 1283, 9, 28],
+[468982, 1284, 1, 11],
+[469141, 1284, 6, 18],
+[469150, 1284, 6, 27],
+[469216, 1284, 9, 1],
+[469284, 1284, 11, 8],
+[469471, 1285, 5, 14],
+[469603, 1285, 9, 23],
+[469630, 1285, 10, 20],
+[469671, 1285, 11, 30],
+[469863, 1286, 6, 10],
+[469903, 1286, 7, 20],
+[470035, 1286, 11, 29],
+[470222, 1287, 6, 4],
+[470330, 1287, 9, 20],
+[470488, 1288, 2, 25],
+[470657, 1288, 8, 12],
+[470775, 1288, 12, 8],
+[470919, 1289, 5, 1],
+[470950, 1289, 6, 1],
+[470978, 1289, 6, 29],
+[471032, 1289, 8, 22],
+[471186, 1290, 1, 23],
+[471260, 1290, 4, 7],
+[471459, 1290, 10, 23],
+[471630, 1291, 4, 12],
+[471778, 1291, 9, 7],
+[471977, 1292, 3, 24],
+[472167, 1292, 9, 30],
+[472269, 1293, 1, 10],
+[472378, 1293, 4, 29],
+[472482, 1293, 8, 11],
+[472620, 1293, 12, 27],
+[472640, 1294, 1, 16],
+[472822, 1294, 7, 17],
+[472840, 1294, 8, 4],
+[472994, 1295, 1, 5],
+[473081, 1295, 4, 2],
+[473159, 1295, 6, 19],
+[473214, 1295, 8, 13],
+[473309, 1295, 11, 16],
+[473486, 1296, 5, 11],
+[473657, 1296, 10, 29],
+[473682, 1296, 11, 23],
+[473825, 1297, 4, 15],
+[473961, 1297, 8, 29],
+[474108, 1298, 1, 23],
+[474193, 1298, 4, 18],
+[474243, 1298, 6, 7],
+[474279, 1298, 7, 13],
+[474299, 1298, 8, 2],
+[474459, 1299, 1, 9],
+[474525, 1299, 3, 16],
+[474685, 1299, 8, 23],
+[474694, 1299, 9, 1],
+[474742, 1299, 10, 19],
+[474854, 1300, 2, 8],
+[474944, 1300, 5, 9],
+[475070, 1300, 9, 12],
+[475254, 1301, 3, 15],
+[475441, 1301, 9, 18],
+[475573, 1302, 1, 28],
+[475749, 1302, 7, 23],
+[475935, 1303, 1, 25],
+[476007, 1303, 4, 7],
+[476162, 1303, 9, 9],
+[476174, 1303, 9, 21],
+[476357, 1304, 3, 22],
+[476539, 1304, 9, 20],
+[476609, 1304, 11, 29],
+[476703, 1305, 3, 3],
+[476815, 1305, 6, 23],
+[476990, 1305, 12, 15],
+[477007, 1306, 1, 1],
+[477144, 1306, 5, 18],
+[477190, 1306, 7, 3],
+[477375, 1307, 1, 4],
+[477475, 1307, 4, 14],
+[477498, 1307, 5, 7],
+[477544, 1307, 6, 22],
+[477656, 1307, 10, 12],
+[477838, 1308, 4, 11],
+[478032, 1308, 10, 22],
+[478148, 1309, 2, 15],
+[478316, 1309, 8, 2],
+[478416, 1309, 11, 10],
+[478474, 1310, 1, 7],
+[478667, 1310, 7, 19],
+[478681, 1310, 8, 2],
+[478714, 1310, 9, 4],
+[478723, 1310, 9, 13],
+[478826, 1310, 12, 25],
+[478956, 1311, 5, 4],
+[479065, 1311, 8, 21],
+[479226, 1312, 1, 29],
+[479412, 1312, 8, 2],
+[479552, 1312, 12, 20],
+[479679, 1313, 4, 26],
+[479786, 1313, 8, 11],
+[479834, 1313, 9, 28],
+[480005, 1314, 3, 18],
+[480055, 1314, 5, 7],
+[480143, 1314, 8, 3],
+[480278, 1314, 12, 16],
+[480391, 1315, 4, 8],
+[480399, 1315, 4, 16],
+[480558, 1315, 9, 22],
+[480595, 1315, 10, 29],
+[480780, 1316, 5, 1],
+[480865, 1316, 7, 25],
+[480965, 1316, 11, 2],
+[481132, 1317, 4, 18],
+[481209, 1317, 7, 4],
+[481406, 1318, 1, 17],
+[481522, 1318, 5, 13],
+[481633, 1318, 9, 1],
+[481821, 1319, 3, 8],
+[481830, 1319, 3, 17],
+[481922, 1319, 6, 17],
+[482104, 1319, 12, 16],
+[482238, 1320, 4, 28],
+[482405, 1320, 10, 12],
+[482522, 1321, 2, 6],
+[482575, 1321, 3, 31],
+[482693, 1321, 7, 27],
+[482734, 1321, 9, 6],
+[482870, 1322, 1, 20],
+[482956, 1322, 4, 16],
+[483063, 1322, 8, 1],
+[483174, 1322, 11, 20],
+[483351, 1323, 5, 16],
+[483394, 1323, 6, 28],
+[483415, 1323, 7, 19],
+[483579, 1323, 12, 30],
+[483611, 1324, 1, 31],
+[483699, 1324, 4, 28],
+[483741, 1324, 6, 9],
+[483922, 1324, 12, 7],
+[483996, 1325, 2, 19],
+[484196, 1325, 9, 7],
+[484383, 1326, 3, 13],
+[484430, 1326, 4, 29],
+[484448, 1326, 5, 17],
+[484607, 1326, 10, 23],
+[484799, 1327, 5, 3],
+[484937, 1327, 9, 18],
+[485001, 1327, 11, 21],
+[485194, 1328, 6, 1],
+[485199, 1328, 6, 6],
+[485226, 1328, 7, 3],
+[485279, 1328, 8, 25],
+[485427, 1329, 1, 20],
+[485611, 1329, 7, 23],
+[485622, 1329, 8, 3],
+[485668, 1329, 9, 18],
+[485681, 1329, 10, 1],
+[485729, 1329, 11, 18],
+[485873, 1330, 4, 11],
+[486014, 1330, 8, 30],
+[486127, 1330, 12, 21],
+[486307, 1331, 6, 19],
+[486415, 1331, 10, 5],
+[486515, 1332, 1, 13],
+[486700, 1332, 7, 16],
+[486717, 1332, 8, 2],
+[486726, 1332, 8, 11],
+[486891, 1333, 1, 23],
+[487034, 1333, 6, 15],
+[487055, 1333, 7, 6],
+[487148, 1333, 10, 7],
+[487334, 1334, 4, 11],
+[487404, 1334, 6, 20],
+[487432, 1334, 7, 18],
+[487446, 1334, 8, 1],
+[487618, 1335, 1, 20],
+[487741, 1335, 5, 23],
+[487925, 1335, 11, 23],
+[488107, 1336, 5, 23],
+[488298, 1336, 11, 30],
+[488307, 1336, 12, 9],
+[488321, 1336, 12, 23],
+[488430, 1337, 4, 11],
+[488517, 1337, 7, 7],
+[488651, 1337, 11, 18],
+[488770, 1338, 3, 17],
+[488904, 1338, 7, 29],
+[488927, 1338, 8, 21],
+[489121, 1339, 3, 3],
+[489200, 1339, 5, 21],
+[489233, 1339, 6, 23],
+[489306, 1339, 9, 4],
+[489436, 1340, 1, 12],
+[489567, 1340, 5, 22],
+[489706, 1340, 10, 8],
+[489728, 1340, 10, 30],
+[489733, 1340, 11, 4],
+[489891, 1341, 4, 11],
+[489944, 1341, 6, 3],
+[489951, 1341, 6, 10],
+[489990, 1341, 7, 19],
+[490066, 1341, 10, 3],
+[490226, 1342, 3, 12],
+[490232, 1342, 3, 18],
+[490398, 1342, 8, 31],
+[490531, 1343, 1, 11],
+[490685, 1343, 6, 14],
+[490869, 1343, 12, 15],
+[490988, 1344, 4, 12],
+[491150, 1344, 9, 21],
+[491181, 1344, 10, 22],
+[491218, 1344, 11, 28],
+[491228, 1344, 12, 8],
+[491242, 1344, 12, 22],
+[491386, 1345, 5, 15],
+[491421, 1345, 6, 19],
+[491520, 1345, 9, 26],
+[491653, 1346, 2, 6],
+[491765, 1346, 5, 29],
+[491911, 1346, 10, 22],
+[492055, 1347, 3, 15],
+[492237, 1347, 9, 13],
+[492376, 1348, 1, 30],
+[492496, 1348, 5, 29],
+[492601, 1348, 9, 11],
+[492799, 1349, 3, 28],
+[492802, 1349, 3, 31],
+[492926, 1349, 8, 2],
+[493022, 1349, 11, 6],
+[493169, 1350, 4, 2],
+[493237, 1350, 6, 9],
+[493417, 1350, 12, 6],
+[493425, 1350, 12, 14],
+[493580, 1351, 5, 18],
+[493693, 1351, 9, 8],
+[493783, 1351, 12, 7],
+[493856, 1352, 2, 18],
+[493922, 1352, 4, 24],
+[494108, 1352, 10, 27],
+[494284, 1353, 4, 21],
+[494381, 1353, 7, 27],
+[494430, 1353, 9, 14],
+[494536, 1353, 12, 29],
+[494640, 1354, 4, 12],
+[494785, 1354, 9, 4],
+[494938, 1355, 2, 4],
+[494976, 1355, 3, 14],
+[495142, 1355, 8, 27],
+[495267, 1355, 12, 30],
+[495452, 1356, 7, 2],
+[495575, 1356, 11, 2],
+[495637, 1357, 1, 3],
+[495789, 1357, 6, 4],
+[495848, 1357, 8, 2],
+[495853, 1357, 8, 7],
+[495952, 1357, 11, 14],
+[496004, 1358, 1, 5],
+[496137, 1358, 5, 18],
+[496155, 1358, 6, 5],
+[496228, 1358, 8, 17],
+[496373, 1359, 1, 9],
+[496438, 1359, 3, 15],
+[496630, 1359, 9, 23],
+[496694, 1359, 11, 26],
+[496820, 1360, 3, 31],
+[497001, 1360, 9, 28],
+[497065, 1360, 12, 1],
+[497242, 1361, 5, 27],
+[497441, 1361, 12, 12],
+[497639, 1362, 6, 28],
+[497742, 1362, 10, 9],
+[497788, 1362, 11, 24],
+[497960, 1363, 5, 15],
+[498037, 1363, 7, 31],
+[498152, 1363, 11, 23],
+[498187, 1363, 12, 28],
+[498195, 1364, 1, 5],
+[498205, 1364, 1, 15],
+[498229, 1364, 2, 8],
+[498371, 1364, 6, 29],
+[498466, 1364, 10, 2],
+[498568, 1365, 1, 12],
+[498580, 1365, 1, 24],
+[498771, 1365, 8, 3],
+[498782, 1365, 8, 14],
+[498942, 1366, 1, 21],
+[499100, 1366, 6, 28],
+[499199, 1366, 10, 5],
+[499258, 1366, 12, 3],
+[499417, 1367, 5, 11],
+[499521, 1367, 8, 23],
+[499528, 1367, 8, 30],
+[499640, 1367, 12, 20],
+[499645, 1367, 12, 25],
+[499698, 1368, 2, 16],
+[499814, 1368, 6, 11],
+[499970, 1368, 11, 14],
+[500016, 1368, 12, 30],
+[500065, 1369, 2, 17],
+[500231, 1369, 8, 2],
+[500286, 1369, 9, 26],
+[500404, 1370, 1, 22],
+[500486, 1370, 4, 14],
+[500667, 1370, 10, 12],
+[500798, 1371, 2, 20],
+[500824, 1371, 3, 18],
+[500986, 1371, 8, 27],
+[501151, 1372, 2, 8],
+[501323, 1372, 7, 29],
+[501496, 1373, 1, 18],
+[501580, 1373, 4, 12],
+[501684, 1373, 7, 25],
+[501764, 1373, 10, 13],
+[501810, 1373, 11, 28],
+[501893, 1374, 2, 19],
+[501954, 1374, 4, 21],
+[502011, 1374, 6, 17],
+[502101, 1374, 9, 15],
+[502110, 1374, 9, 24],
+[502163, 1374, 11, 16],
+[502317, 1375, 4, 19],
+[502496, 1375, 10, 15],
+[502550, 1375, 12, 8],
+[502570, 1375, 12, 28],
+[502767, 1376, 7, 12],
+[502944, 1377, 1, 5],
+[503082, 1377, 5, 23],
+[503244, 1377, 11, 1],
+[503401, 1378, 4, 7],
+[503587, 1378, 10, 10],
+[503767, 1379, 4, 8],
+[503909, 1379, 8, 28],
+[504060, 1380, 1, 26],
+[504136, 1380, 4, 11],
+[504280, 1380, 9, 2],
+[504347, 1380, 11, 8],
+[504501, 1381, 4, 11],
+[504590, 1381, 7, 9],
+[504643, 1381, 8, 31],
+[504645, 1381, 9, 2],
+[504770, 1382, 1, 5],
+[504954, 1382, 7, 8],
+[505143, 1383, 1, 13],
+[505166, 1383, 2, 5],
+[505253, 1383, 5, 3],
+[505282, 1383, 6, 1],
+[505415, 1383, 10, 12],
+[505521, 1384, 1, 26],
+[505719, 1384, 8, 11],
+[505888, 1385, 1, 27],
+[506078, 1385, 8, 5],
+[506089, 1385, 8, 16],
+[506282, 1386, 2, 25],
+[506346, 1386, 4, 30],
+[506413, 1386, 7, 6],
+[506562, 1386, 12, 2],
+[506741, 1387, 5, 30],
+[506835, 1387, 9, 1],
+[506919, 1387, 11, 24],
+[506960, 1388, 1, 4],
+[507060, 1388, 4, 13],
+[507225, 1388, 9, 25],
+[507289, 1388, 11, 28],
+[507446, 1389, 5, 4],
+[507503, 1389, 6, 30],
+[507609, 1389, 10, 14],
+[507634, 1389, 11, 8],
+[507783, 1390, 4, 6],
+[507789, 1390, 4, 12],
+[507982, 1390, 10, 22],
+[508101, 1391, 2, 18],
+[508203, 1391, 5, 31],
+[508270, 1391, 8, 6],
+[508326, 1391, 10, 1],
+[508449, 1392, 2, 1],
+[508520, 1392, 4, 12],
+[508695, 1392, 10, 4],
+[508728, 1392, 11, 6],
+[508909, 1393, 5, 6],
+[509040, 1393, 9, 14],
+[509176, 1394, 1, 28],
+[509178, 1394, 1, 30],
+[509216, 1394, 3, 9],
+[509306, 1394, 6, 7],
+[509310, 1394, 6, 11],
+[509482, 1394, 11, 30],
+[509636, 1395, 5, 3],
+[509788, 1395, 10, 2],
+[509804, 1395, 10, 18],
+[509976, 1396, 4, 7],
+[510054, 1396, 6, 24],
+[510139, 1396, 9, 17],
+[510316, 1397, 3, 13],
+[510336, 1397, 4, 2],
+[510348, 1397, 4, 14],
+[510527, 1397, 10, 10],
+[510613, 1398, 1, 4],
+[510777, 1398, 6, 17],
+[510956, 1398, 12, 13],
+[510990, 1399, 1, 16],
+[511123, 1399, 5, 29],
+[511247, 1399, 9, 30],
+[511308, 1399, 11, 30],
+[511369, 1400, 1, 30],
+[511546, 1400, 7, 26],
+[511609, 1400, 9, 27],
+[511702, 1400, 12, 29],
+[511793, 1401, 3, 30],
+[511854, 1401, 5, 30],
+[511878, 1401, 6, 23],
+[511944, 1401, 8, 28],
+[512130, 1402, 3, 2],
+[512205, 1402, 5, 16],
+[512354, 1402, 10, 12],
+[512502, 1403, 3, 9],
+[512663, 1403, 8, 17],
+[512819, 1404, 1, 20],
+[512875, 1404, 3, 16],
+[512921, 1404, 5, 1],
+[513118, 1404, 11, 14],
+[513175, 1405, 1, 10],
+[513227, 1405, 3, 3],
+[513384, 1405, 8, 7],
+[513555, 1406, 1, 25],
+[513623, 1406, 4, 3],
+[513653, 1406, 5, 3],
+[513709, 1406, 6, 28],
+[513885, 1406, 12, 21],
+[514081, 1407, 7, 5],
+[514173, 1407, 10, 5],
+[514320, 1408, 2, 29],
+[514413, 1408, 6, 1],
+[514603, 1408, 12, 8],
+[514638, 1409, 1, 12],
+[514709, 1409, 3, 24],
+[514735, 1409, 4, 19],
+[514863, 1409, 8, 25],
+[514901, 1409, 10, 2],
+[515000, 1410, 1, 9],
+[515026, 1410, 2, 4],
+[515092, 1410, 4, 11],
+[515140, 1410, 5, 29],
+[515231, 1410, 8, 28],
+[515311, 1410, 11, 16],
+[515368, 1411, 1, 12],
+[515408, 1411, 2, 21],
+[515551, 1411, 7, 14],
+[515624, 1411, 9, 25],
+[515700, 1411, 12, 10],
+[515756, 1412, 2, 4],
+[515802, 1412, 3, 21],
+[515943, 1412, 8, 9],
+[516065, 1412, 12, 9],
+[516249, 1413, 6, 11],
+[516402, 1413, 11, 11],
+[516430, 1413, 12, 9],
+[516511, 1414, 2, 28],
+[516668, 1414, 8, 4],
+[516682, 1414, 8, 18],
+[516850, 1415, 2, 2],
+[516875, 1415, 2, 27],
+[516913, 1415, 4, 6],
+[517096, 1415, 10, 6],
+[517278, 1416, 4, 5],
+[517314, 1416, 5, 11],
+[517388, 1416, 7, 24],
+[517419, 1416, 8, 24],
+[517556, 1417, 1, 8],
+[517676, 1417, 5, 8],
+[517844, 1417, 10, 23],
+[517917, 1418, 1, 4],
+[518013, 1418, 4, 10],
+[518094, 1418, 6, 30],
+[518156, 1418, 8, 31],
+[518185, 1418, 9, 29],
+[518252, 1418, 12, 5],
+[518388, 1419, 4, 20],
+[518390, 1419, 4, 22],
+[518508, 1419, 8, 18],
+[518651, 1420, 1, 8],
+[518695, 1420, 2, 21],
+[518841, 1420, 7, 16],
+[518852, 1420, 7, 27],
+[519041, 1421, 2, 1],
+[519052, 1421, 2, 12],
+[519118, 1421, 4, 19],
+[519313, 1421, 10, 31],
+[519438, 1422, 3, 5],
+[519513, 1422, 5, 19],
+[519602, 1422, 8, 16],
+[519650, 1422, 10, 3],
+[519817, 1423, 3, 19],
+[519892, 1423, 6, 2],
+[520047, 1423, 11, 4],
+[520177, 1424, 3, 13],
+[520178, 1424, 3, 14],
+[520293, 1424, 7, 7],
+[520318, 1424, 8, 1],
+[520342, 1424, 8, 25],
+[520385, 1424, 10, 7],
+[520555, 1425, 3, 26],
+[520669, 1425, 7, 18],
+[520846, 1426, 1, 11],
+[520921, 1426, 3, 27],
+[521020, 1426, 7, 4],
+[521182, 1426, 12, 13],
+[521244, 1427, 2, 13],
+[521354, 1427, 6, 3],
+[521439, 1427, 8, 27],
+[521506, 1427, 11, 2],
+[521527, 1427, 11, 23],
+[521585, 1428, 1, 20],
+[521691, 1428, 5, 5],
+[521780, 1428, 8, 2],
+[521941, 1429, 1, 10],
+[521984, 1429, 2, 22],
+[522025, 1429, 4, 4],
+[522054, 1429, 5, 3],
+[522119, 1429, 7, 7],
+[522294, 1429, 12, 29],
+[522477, 1430, 6, 30],
+[522614, 1430, 11, 14],
+[522763, 1431, 4, 12],
+[522921, 1431, 9, 17],
+[523032, 1432, 1, 6],
+[523074, 1432, 2, 17],
+[523247, 1432, 8, 8],
+[523422, 1433, 1, 30],
+[523474, 1433, 3, 23],
+[523565, 1433, 6, 22],
+[523600, 1433, 7, 27],
+[523633, 1433, 8, 29],
+[523666, 1433, 10, 1],
+[523768, 1434, 1, 11],
+[523939, 1434, 7, 1],
+[523979, 1434, 8, 10],
+[524053, 1434, 10, 23],
+[524133, 1435, 1, 11],
+[524297, 1435, 6, 24],
+[524354, 1435, 8, 20],
+[524478, 1435, 12, 22],
+[524504, 1436, 1, 17],
+[524534, 1436, 2, 16],
+[524661, 1436, 6, 22],
+[524718, 1436, 8, 18],
+[524837, 1436, 12, 15],
+[524874, 1437, 1, 21],
+[524889, 1437, 2, 5],
+[525011, 1437, 6, 7],
+[525069, 1437, 8, 4],
+[525222, 1438, 1, 4],
+[525252, 1438, 2, 3],
+[525420, 1438, 7, 21],
+[525569, 1438, 12, 17],
+[525585, 1439, 1, 2],
+[525614, 1439, 1, 31],
+[525799, 1439, 8, 4],
+[525920, 1439, 12, 3],
+[526008, 1440, 2, 29],
+[526117, 1440, 6, 17],
+[526175, 1440, 8, 14],
+[526328, 1441, 1, 14],
+[526365, 1441, 2, 20],
+[526441, 1441, 5, 7],
+[526590, 1441, 10, 3],
+[526679, 1441, 12, 31],
+[526789, 1442, 4, 20],
+[526938, 1442, 9, 16],
+[526956, 1442, 10, 4],
+[527084, 1443, 2, 9],
+[527096, 1443, 2, 21],
+[527135, 1443, 4, 1],
+[527257, 1443, 8, 1],
+[527452, 1444, 2, 12],
+[527506, 1444, 4, 6],
+[527574, 1444, 6, 13],
+[527593, 1444, 7, 2],
+[527768, 1444, 12, 24],
+[527869, 1445, 4, 4],
+[527961, 1445, 7, 5],
+[528126, 1445, 12, 17],
+[528168, 1446, 1, 28],
+[528272, 1446, 5, 12],
+[528412, 1446, 9, 29],
+[528572, 1447, 3, 8],
+[528576, 1447, 3, 12],
+[528712, 1447, 7, 26],
+[528866, 1447, 12, 27],
+[528896, 1448, 1, 26],
+[529060, 1448, 7, 8],
+[529249, 1449, 1, 13],
+[529265, 1449, 1, 29],
+[529391, 1449, 6, 4],
+[529504, 1449, 9, 25],
+[529596, 1449, 12, 26],
+[529682, 1450, 3, 22],
+[529850, 1450, 9, 6],
+[529913, 1450, 11, 8],
+[530019, 1451, 2, 22],
+[530177, 1451, 7, 30],
+[530213, 1451, 9, 4],
+[530318, 1451, 12, 18],
+[530424, 1452, 4, 2],
+[530498, 1452, 6, 15],
+[530656, 1452, 11, 20],
+[530854, 1453, 6, 6],
+[531009, 1453, 11, 8],
+[531176, 1454, 4, 24],
+[531217, 1454, 6, 4],
+[531275, 1454, 8, 1],
+[531323, 1454, 9, 18],
+[531337, 1454, 10, 2],
+[531356, 1454, 10, 21],
+[531501, 1455, 3, 15],
+[531671, 1455, 9, 1],
+[531791, 1455, 12, 30],
+[531793, 1456, 1, 1],
+[531873, 1456, 3, 21],
+[531894, 1456, 4, 11],
+[532018, 1456, 8, 13],
+[532056, 1456, 9, 20],
+[532192, 1457, 2, 3],
+[532220, 1457, 3, 3],
+[532319, 1457, 6, 10],
+[532450, 1457, 10, 19],
+[532560, 1458, 2, 6],
+[532567, 1458, 2, 13],
+[532616, 1458, 4, 3],
+[532744, 1458, 8, 9],
+[532928, 1459, 2, 9],
+[533128, 1459, 8, 28],
+[533285, 1460, 2, 1],
+[533325, 1460, 3, 12],
+[533396, 1460, 5, 22],
+[533508, 1460, 9, 11],
+[533522, 1460, 9, 25],
+[533668, 1461, 2, 18],
+[533778, 1461, 6, 8],
+[533793, 1461, 6, 23],
+[533874, 1461, 9, 12],
+[533913, 1461, 10, 21],
+[534090, 1462, 4, 16],
+[534217, 1462, 8, 21],
+[534354, 1463, 1, 5],
+[534409, 1463, 3, 1],
+[534563, 1463, 8, 2],
+[534697, 1463, 12, 14],
+[534875, 1464, 6, 9],
+[534993, 1464, 10, 5],
+[535144, 1465, 3, 5],
+[535300, 1465, 8, 8],
+[535457, 1466, 1, 12],
+[535483, 1466, 2, 7],
+[535554, 1466, 4, 19],
+[535655, 1466, 7, 29],
+[535730, 1466, 10, 12],
+[535821, 1467, 1, 11],
+[536013, 1467, 7, 22],
+[536157, 1467, 12, 13],
+[536271, 1468, 4, 5],
+[536440, 1468, 9, 21],
+[536567, 1469, 1, 26],
+[536748, 1469, 7, 26],
+[536825, 1469, 10, 11],
+[536973, 1470, 3, 8],
+[537039, 1470, 5, 13],
+[537185, 1470, 10, 6],
+[537380, 1471, 4, 19],
+[537545, 1471, 10, 1],
+[537715, 1472, 3, 19],
+[537854, 1472, 8, 5],
+[538019, 1473, 1, 17],
+[538077, 1473, 3, 16],
+[538117, 1473, 4, 25],
+[538205, 1473, 7, 22],
+[538401, 1474, 2, 3],
+[538595, 1474, 8, 16],
+[538794, 1475, 3, 3],
+[538951, 1475, 8, 7],
+[538956, 1475, 8, 12],
+[539022, 1475, 10, 17],
+[539052, 1475, 11, 16],
+[539213, 1476, 4, 25],
+[539276, 1476, 6, 27],
+[539446, 1476, 12, 14],
+[539572, 1477, 4, 19],
+[539695, 1477, 8, 20],
+[539841, 1478, 1, 13],
+[539913, 1478, 3, 26],
+[540003, 1478, 6, 24],
+[540052, 1478, 8, 12],
+[540214, 1479, 1, 21],
+[540378, 1479, 7, 4],
+[540534, 1479, 12, 7],
+[540595, 1480, 2, 6],
+[540745, 1480, 7, 5],
+[540929, 1481, 1, 5],
+[540982, 1481, 2, 27],
+[541021, 1481, 4, 7],
+[541180, 1481, 9, 13],
+[541286, 1481, 12, 28],
+[541391, 1482, 4, 12],
+[541395, 1482, 4, 16],
+[541527, 1482, 8, 26],
+[541559, 1482, 9, 27],
+[541628, 1482, 12, 5],
+[541769, 1483, 4, 25],
+[541840, 1483, 7, 5],
+[542037, 1484, 1, 18],
+[542167, 1484, 5, 27],
+[542293, 1484, 9, 30],
+[542326, 1484, 11, 2],
+[542464, 1485, 3, 20],
+[542489, 1485, 4, 14],
+[542648, 1485, 9, 20],
+[542728, 1485, 12, 9],
+[542744, 1485, 12, 25],
+[542886, 1486, 5, 16],
+[542978, 1486, 8, 16],
+[543039, 1486, 10, 16],
+[543141, 1487, 1, 26],
+[543213, 1487, 4, 8],
+[543336, 1487, 8, 9],
+[543445, 1487, 11, 26],
+[543526, 1488, 2, 15],
+[543656, 1488, 6, 24],
+[543684, 1488, 7, 22],
+[543819, 1488, 12, 4],
+[543933, 1489, 3, 28],
+[543981, 1489, 5, 15],
+[544007, 1489, 6, 10],
+[544074, 1489, 8, 16],
+[544111, 1489, 9, 22],
+[544129, 1489, 10, 10],
+[544303, 1490, 4, 2],
+[544371, 1490, 6, 9],
+[544460, 1490, 9, 6],
+[544606, 1491, 1, 30],
+[544608, 1491, 2, 1],
+[544633, 1491, 2, 26],
+[544790, 1491, 8, 2],
+[544825, 1491, 9, 6],
+[545025, 1492, 3, 24],
+[545186, 1492, 9, 1],
+[545275, 1492, 11, 29],
+[545336, 1493, 1, 29],
+[545424, 1493, 4, 27],
+[545452, 1493, 5, 25],
+[545505, 1493, 7, 17],
+[545640, 1493, 11, 29],
+[545660, 1493, 12, 19],
+[545736, 1494, 3, 5],
+[545871, 1494, 7, 18],
+[546005, 1494, 11, 29],
+[546015, 1494, 12, 9],
+[546171, 1495, 5, 14],
+[546316, 1495, 10, 6],
+[546505, 1496, 4, 12],
+[546576, 1496, 6, 22],
+[546671, 1496, 9, 25],
+[546780, 1497, 1, 12],
+[546818, 1497, 2, 19],
+[546905, 1497, 5, 17],
+[546918, 1497, 5, 30],
+[546933, 1497, 6, 14],
+[547104, 1497, 12, 2],
+[547151, 1498, 1, 18],
+[547194, 1498, 3, 2],
+[547375, 1498, 8, 30],
+[547398, 1498, 9, 22],
+[547475, 1498, 12, 8],
+[547636, 1499, 5, 18],
+[547647, 1499, 5, 29],
+[547826, 1499, 11, 24],
+[547857, 1499, 12, 25],
+[547929, 1500, 3, 7],
+[548025, 1500, 6, 11],
+[548169, 1500, 11, 2],
+[548316, 1501, 3, 29],
+[548399, 1501, 6, 20],
+[548536, 1501, 11, 4],
+[548634, 1502, 2, 10],
+[548825, 1502, 8, 20],
+[548921, 1502, 11, 24],
+[548963, 1503, 1, 5],
+[549051, 1503, 4, 3],
+[549130, 1503, 6, 21],
+[549251, 1503, 10, 20],
+[549259, 1503, 10, 28],
+[549409, 1504, 3, 26],
+[549524, 1504, 7, 19],
+[549723, 1505, 2, 3],
+[549817, 1505, 5, 8],
+[549885, 1505, 7, 15],
+[550044, 1505, 12, 21],
+[550139, 1506, 3, 26],
+[550176, 1506, 5, 2],
+[550271, 1506, 8, 5],
+[550385, 1506, 11, 27],
+[550488, 1507, 3, 10],
+[550594, 1507, 6, 24],
+[550736, 1507, 11, 13],
+[550848, 1508, 3, 4],
+[550888, 1508, 4, 13],
+[551079, 1508, 10, 21],
+[551164, 1509, 1, 14],
+[551272, 1509, 5, 2],
+[551433, 1509, 10, 10],
+[551548, 1510, 2, 2],
+[551581, 1510, 3, 7],
+[551667, 1510, 6, 1],
+[551797, 1510, 10, 9],
+[551938, 1511, 2, 27],
+[551967, 1511, 3, 28],
+[552128, 1511, 9, 5],
+[552229, 1511, 12, 15],
+[552318, 1512, 3, 13],
+[552406, 1512, 6, 9],
+[552513, 1512, 9, 24],
+[552560, 1512, 11, 10],
+[552589, 1512, 12, 9],
+[552654, 1513, 2, 12],
+[552699, 1513, 3, 29],
+[552750, 1513, 5, 19],
+[552865, 1513, 9, 11],
+[552944, 1513, 11, 29],
+[552990, 1514, 1, 14],
+[553149, 1514, 6, 22],
+[553312, 1514, 12, 2],
+[553436, 1515, 4, 5],
+[553476, 1515, 5, 15],
+[553620, 1515, 10, 6],
+[553679, 1515, 12, 4],
+[553691, 1515, 12, 16],
+[553720, 1516, 1, 14],
+[553852, 1516, 5, 25],
+[553874, 1516, 6, 16],
+[553891, 1516, 7, 3],
+[553912, 1516, 7, 24],
+[554029, 1516, 11, 18],
+[554132, 1517, 3, 1],
+[554214, 1517, 5, 22],
+[554384, 1517, 11, 8],
+[554420, 1517, 12, 14],
+[554476, 1518, 2, 8],
+[554536, 1518, 4, 9],
+[554659, 1518, 8, 10],
+[554810, 1519, 1, 8],
+[554879, 1519, 3, 18],
+[555004, 1519, 7, 21],
+[555035, 1519, 8, 21],
+[555232, 1520, 3, 5],
+[555276, 1520, 4, 18],
+[555430, 1520, 9, 19],
+[555589, 1521, 2, 25],
+[555769, 1521, 8, 24],
+[555893, 1521, 12, 26],
+[555928, 1522, 1, 30],
+[555994, 1522, 4, 6],
+[556034, 1522, 5, 16],
+[556046, 1522, 5, 28],
+[556081, 1522, 7, 2],
+[556144, 1522, 9, 3],
+[556184, 1522, 10, 13],
+[556285, 1523, 1, 22],
+[556429, 1523, 6, 15],
+[556567, 1523, 10, 31],
+[556604, 1523, 12, 7],
+[556707, 1524, 3, 19],
+[556866, 1524, 8, 25],
+[556992, 1524, 12, 29],
+[557175, 1525, 6, 30],
+[557265, 1525, 9, 28],
+[557317, 1525, 11, 19],
+[557399, 1526, 2, 9],
+[557504, 1526, 5, 25],
+[557527, 1526, 6, 17],
+[557587, 1526, 8, 16],
+[557783, 1527, 2, 28],
+[557862, 1527, 5, 18],
+[557906, 1527, 7, 1],
+[558062, 1527, 12, 4],
+[558107, 1528, 1, 18],
+[558114, 1528, 1, 25],
+[558138, 1528, 2, 18],
+[558332, 1528, 8, 30],
+[558418, 1528, 11, 24],
+[558483, 1529, 1, 28],
+[558657, 1529, 7, 21],
+[558822, 1530, 1, 2],
+[558892, 1530, 3, 13],
+[559075, 1530, 9, 12],
+[559111, 1530, 10, 18],
+[559258, 1531, 3, 14],
+[559293, 1531, 4, 18],
+[559380, 1531, 7, 14],
+[559390, 1531, 7, 24],
+[559425, 1531, 8, 28],
+[559542, 1531, 12, 23],
+[559661, 1532, 4, 20],
+[559667, 1532, 4, 26],
+[559847, 1532, 10, 23],
+[559946, 1533, 1, 30],
+[560102, 1533, 7, 5],
+[560104, 1533, 7, 7],
+[560144, 1533, 8, 16],
+[560332, 1534, 2, 20],
+[560499, 1534, 8, 6],
+[560631, 1534, 12, 16],
+[560810, 1535, 6, 13],
+[560853, 1535, 7, 26],
+[560951, 1535, 11, 1],
+[561057, 1536, 2, 15],
+[561210, 1536, 7, 17],
+[561352, 1536, 12, 6],
+[561450, 1537, 3, 14],
+[561511, 1537, 5, 14],
+[561650, 1537, 9, 30],
+[561659, 1537, 10, 9],
+[561824, 1538, 3, 23],
+[561847, 1538, 4, 15],
+[562047, 1538, 11, 1],
+[562058, 1538, 11, 12],
+[562070, 1538, 11, 24],
+[562116, 1539, 1, 9],
+[562302, 1539, 7, 14],
+[562378, 1539, 9, 28],
+[562432, 1539, 11, 21],
+[562437, 1539, 11, 26],
+[562582, 1540, 4, 19],
+[562593, 1540, 4, 30],
+[562686, 1540, 8, 1],
+[562777, 1540, 10, 31],
+[562883, 1541, 2, 14],
+[562971, 1541, 5, 13],
+[563061, 1541, 8, 11],
+[563076, 1541, 8, 26],
+[563155, 1541, 11, 13],
+[563315, 1542, 4, 22],
+[563411, 1542, 7, 27],
+[563434, 1542, 8, 19],
+[563543, 1542, 12, 6],
+[563579, 1543, 1, 11],
+[563623, 1543, 2, 24],
+[563676, 1543, 4, 18],
+[563787, 1543, 8, 7],
+[563828, 1543, 9, 17],
+[563840, 1543, 9, 29],
+[564020, 1544, 3, 27],
+[564203, 1544, 9, 26],
+[564377, 1545, 3, 19],
+[564421, 1545, 5, 2],
+[564455, 1545, 6, 5],
+[564605, 1545, 11, 2],
+[564740, 1546, 3, 17],
+[564850, 1546, 7, 5],
+[565033, 1547, 1, 4],
+[565145, 1547, 4, 26],
+[565233, 1547, 7, 23],
+[565349, 1547, 11, 16],
+[565359, 1547, 11, 26],
+[565449, 1548, 2, 24],
+[565640, 1548, 9, 2],
+[565819, 1549, 2, 28],
+[565827, 1549, 3, 8],
+[565874, 1549, 4, 24],
+[566023, 1549, 9, 20],
+[566136, 1550, 1, 11],
+[566250, 1550, 5, 5],
+[566450, 1550, 11, 21],
+[566459, 1550, 11, 30],
+[566506, 1551, 1, 16],
+[566523, 1551, 2, 2],
+[566606, 1551, 4, 26],
+[566612, 1551, 5, 2],
+[566622, 1551, 5, 12],
+[566699, 1551, 7, 28],
+[566850, 1551, 12, 26],
+[567020, 1552, 6, 13],
+[567028, 1552, 6, 21],
+[567154, 1552, 10, 25],
+[567274, 1553, 2, 22],
+[567474, 1553, 9, 10],
+[567574, 1553, 12, 19],
+[567592, 1554, 1, 6],
+[567662, 1554, 3, 17],
+[567770, 1554, 7, 3],
+[567864, 1554, 10, 5],
+[567945, 1554, 12, 25],
+[567999, 1555, 2, 17],
+[568182, 1555, 8, 19],
+[568277, 1555, 11, 22],
+[568462, 1556, 5, 25],
+[568518, 1556, 7, 20],
+[568693, 1557, 1, 11],
+[568832, 1557, 5, 30],
+[568913, 1557, 8, 19],
+[569071, 1558, 1, 24],
+[569085, 1558, 2, 7],
+[569089, 1558, 2, 11],
+[569195, 1558, 5, 28],
+[569320, 1558, 9, 30],
+[569346, 1558, 10, 26],
+[569409, 1558, 12, 28],
+[569439, 1559, 1, 27],
+[569455, 1559, 2, 12],
+[569459, 1559, 2, 16],
+[569518, 1559, 4, 16],
+[569662, 1559, 9, 7],
+[569811, 1560, 2, 3],
+[569970, 1560, 7, 11],
+[570148, 1561, 1, 5],
+[570236, 1561, 4, 3],
+[570286, 1561, 5, 23],
+[570412, 1561, 9, 26],
+[570424, 1561, 10, 8],
+[570609, 1562, 4, 11],
+[570763, 1562, 9, 12],
+[570809, 1562, 10, 28],
+[570923, 1563, 2, 19],
+[571052, 1563, 6, 28],
+[571205, 1563, 11, 28],
+[571252, 1564, 1, 14],
+[571380, 1564, 5, 21],
+[571464, 1564, 8, 13],
+[571645, 1565, 2, 10],
+[571775, 1565, 6, 20],
+[571841, 1565, 8, 25],
+[571848, 1565, 9, 1],
+[571902, 1565, 10, 25],
+[572014, 1566, 2, 14],
+[572104, 1566, 5, 15],
+[572228, 1566, 9, 16],
+[572345, 1567, 1, 11],
+[572358, 1567, 1, 24],
+[572441, 1567, 4, 17],
+[572545, 1567, 7, 30],
+[572706, 1568, 1, 7],
+[572769, 1568, 3, 10],
+[572806, 1568, 4, 16],
+[572883, 1568, 7, 2],
+[572886, 1568, 7, 5],
+[573086, 1569, 1, 21],
+[573091, 1569, 1, 26],
+[573242, 1569, 6, 26],
+[573352, 1569, 10, 14],
+[573508, 1570, 3, 19],
+[573557, 1570, 5, 7],
+[573677, 1570, 9, 4],
+[573864, 1571, 3, 10],
+[573887, 1571, 4, 2],
+[573891, 1571, 4, 6],
+[574013, 1571, 8, 6],
+[574093, 1571, 10, 25],
+[574264, 1572, 4, 13],
+[574331, 1572, 6, 19],
+[574482, 1572, 11, 17],
+[574569, 1573, 2, 12],
+[574750, 1573, 8, 12],
+[574751, 1573, 8, 13],
+[574807, 1573, 10, 8],
+[574968, 1574, 3, 18],
+[575146, 1574, 9, 12],
+[575293, 1575, 2, 6],
+[575400, 1575, 5, 24],
+[575459, 1575, 7, 22],
+[575647, 1576, 1, 26],
+[575783, 1576, 6, 10],
+[575916, 1576, 10, 21],
+[575935, 1576, 11, 9],
+[576012, 1577, 1, 25],
+[576114, 1577, 5, 7],
+[576242, 1577, 9, 12],
+[576378, 1578, 1, 26],
+[576408, 1578, 2, 25],
+[576591, 1578, 8, 27],
+[576599, 1578, 9, 4],
+[576615, 1578, 9, 20],
+[576748, 1579, 1, 31],
+[576937, 1579, 8, 8],
+[577112, 1580, 1, 30],
+[577256, 1580, 6, 22],
+[577356, 1580, 9, 30],
+[577529, 1581, 3, 22],
+[577661, 1581, 8, 1],
+[577720, 1581, 9, 29],
+[577892, 1582, 3, 20],
+[578049, 1582, 8, 24],
+[578096, 1582, 10, 10],
+[578195, 1583, 1, 17],
+[578223, 1583, 2, 14],
+[578293, 1583, 4, 25],
+[578357, 1583, 6, 28],
+[578380, 1583, 7, 21],
+[578570, 1584, 1, 27],
+[578690, 1584, 5, 26],
+[578830, 1584, 10, 13],
+[578992, 1585, 3, 24],
+[579190, 1585, 10, 8],
+[579310, 1586, 2, 5],
+[579376, 1586, 4, 12],
+[579420, 1586, 5, 26],
+[579496, 1586, 8, 10],
+[579603, 1586, 11, 25],
+[579724, 1587, 3, 26],
+[579807, 1587, 6, 17],
+[579813, 1587, 6, 23],
+[580012, 1588, 1, 8],
+[580209, 1588, 7, 23],
+[580360, 1588, 12, 21],
+[580452, 1589, 3, 23],
+[580572, 1589, 7, 21],
+[580650, 1589, 10, 7],
+[580766, 1590, 1, 31],
+[580957, 1590, 8, 10],
+[581011, 1590, 10, 3],
+[581114, 1591, 1, 14],
+[581191, 1591, 4, 1],
+[581308, 1591, 7, 27],
+[581489, 1592, 1, 24],
+[581681, 1592, 8, 3],
+[581862, 1593, 1, 31],
+[581982, 1593, 5, 31],
+[582006, 1593, 6, 24],
+[582184, 1593, 12, 19],
+[582293, 1594, 4, 7],
+[582421, 1594, 8, 13],
+[582562, 1595, 1, 1],
+[582679, 1595, 4, 28],
+[582704, 1595, 5, 23],
+[582896, 1595, 12, 1],
+[583024, 1596, 4, 7],
+[583106, 1596, 6, 28],
+[583291, 1596, 12, 30],
+[583453, 1597, 6, 10],
+[583519, 1597, 8, 15],
+[583649, 1597, 12, 23],
+[583785, 1598, 5, 8],
+[583914, 1598, 9, 14],
+[584084, 1599, 3, 3],
+[584222, 1599, 7, 19],
+[584247, 1599, 8, 13],
+[584446, 1600, 2, 28],
+[584597, 1600, 7, 28],
+[584612, 1600, 8, 12],
+[584666, 1600, 10, 5],
+[584774, 1601, 1, 21],
+[584779, 1601, 1, 26],
+[584861, 1601, 4, 18],
+[584914, 1601, 6, 10],
+[585101, 1601, 12, 14],
+[585153, 1602, 2, 4],
+[585302, 1602, 7, 3],
+[585335, 1602, 8, 5],
+[585505, 1603, 1, 22],
+[585671, 1603, 7, 7],
+[585736, 1603, 9, 10],
+[585913, 1604, 3, 5],
+[585937, 1604, 3, 29],
+[585943, 1604, 4, 4],
+[586108, 1604, 9, 16],
+[586249, 1605, 2, 4],
+[586443, 1605, 8, 17],
+[586498, 1605, 10, 11],
+[586601, 1606, 1, 22],
+[586677, 1606, 4, 8],
+[586733, 1606, 6, 3],
+[586777, 1606, 7, 17],
+[586817, 1606, 8, 26],
+[586863, 1606, 10, 11],
+[586925, 1606, 12, 12],
+[586991, 1607, 2, 16],
+[587150, 1607, 7, 25],
+[587265, 1607, 11, 17],
+[587346, 1608, 2, 6],
+[587415, 1608, 4, 15],
+[587450, 1608, 5, 20],
+[587615, 1608, 11, 1],
+[587645, 1608, 12, 1],
+[587755, 1609, 3, 21],
+[587869, 1609, 7, 13],
+[588007, 1609, 11, 28],
+[588053, 1610, 1, 13],
+[588067, 1610, 1, 27],
+[588092, 1610, 2, 21],
+[588250, 1610, 7, 29],
+[588294, 1610, 9, 11],
+[588400, 1610, 12, 26],
+[588540, 1611, 5, 15],
+[588657, 1611, 9, 9],
+[588735, 1611, 11, 26],
+[588856, 1612, 3, 26],
+[588976, 1612, 7, 24],
+[589087, 1612, 11, 12],
+[589240, 1613, 4, 14],
+[589433, 1613, 10, 24],
+[589609, 1614, 4, 18],
+[589643, 1614, 5, 22],
+[589796, 1614, 10, 22],
+[589950, 1615, 3, 25],
+[590068, 1615, 7, 21],
+[590164, 1615, 10, 25],
+[590346, 1616, 4, 24],
+[590525, 1616, 10, 20],
+[590680, 1617, 3, 24],
+[590828, 1617, 8, 19],
+[590844, 1617, 9, 4],
+[590920, 1617, 11, 19],
+[591114, 1618, 6, 1],
+[591234, 1618, 9, 29],
+[591339, 1619, 1, 12],
+[591457, 1619, 5, 10],
+[591502, 1619, 6, 24],
+[591680, 1619, 12, 19],
+[591708, 1620, 1, 16],
+[591785, 1620, 4, 2],
+[591838, 1620, 5, 25],
+[591890, 1620, 7, 16],
+[592015, 1620, 11, 18],
+[592027, 1620, 11, 30],
+[592183, 1621, 5, 5],
+[592333, 1621, 10, 2],
+[592387, 1621, 11, 25],
+[592562, 1622, 5, 19],
+[592685, 1622, 9, 19],
+[592691, 1622, 9, 25],
+[592717, 1622, 10, 21],
+[592841, 1623, 2, 22],
+[592887, 1623, 4, 9],
+[592898, 1623, 4, 20],
+[592908, 1623, 4, 30],
+[592927, 1623, 5, 19],
+[593104, 1623, 11, 12],
+[593272, 1624, 4, 28],
+[593411, 1624, 9, 14],
+[593529, 1625, 1, 10],
+[593692, 1625, 6, 22],
+[593700, 1625, 6, 30],
+[593855, 1625, 12, 2],
+[594044, 1626, 6, 9],
+[594227, 1626, 12, 9],
+[594253, 1627, 1, 4],
+[594267, 1627, 1, 18],
+[594374, 1627, 5, 5],
+[594483, 1627, 8, 22],
+[594514, 1627, 9, 22],
+[594677, 1628, 3, 3],
+[594873, 1628, 9, 15],
+[595012, 1629, 2, 1],
+[595015, 1629, 2, 4],
+[595054, 1629, 3, 15],
+[595230, 1629, 9, 7],
+[595343, 1629, 12, 29],
+[595355, 1630, 1, 10],
+[595405, 1630, 3, 1],
+[595522, 1630, 6, 26],
+[595525, 1630, 6, 29],
+[595650, 1630, 11, 1],
+[595677, 1630, 11, 28],
+[595762, 1631, 2, 21],
+[595879, 1631, 6, 18],
+[596060, 1631, 12, 16],
+[596140, 1632, 3, 5],
+[596214, 1632, 5, 18],
+[596301, 1632, 8, 13],
+[596381, 1632, 11, 1],
+[596550, 1633, 4, 19],
+[596694, 1633, 9, 10],
+[596759, 1633, 11, 14],
+[596820, 1634, 1, 14],
+[597007, 1634, 7, 20],
+[597169, 1634, 12, 29],
+[597190, 1635, 1, 19],
+[597241, 1635, 3, 11],
+[597313, 1635, 5, 22],
+[597504, 1635, 11, 29],
+[597536, 1635, 12, 31],
+[597656, 1636, 4, 29],
+[597836, 1636, 10, 26],
+[597897, 1636, 12, 26],
+[597995, 1637, 4, 3],
+[598026, 1637, 5, 4],
+[598171, 1637, 9, 26],
+[598324, 1638, 2, 26],
+[598453, 1638, 7, 5],
+[598497, 1638, 8, 18],
+[598504, 1638, 8, 25],
+[598647, 1639, 1, 15],
+[598689, 1639, 2, 26],
+[598787, 1639, 6, 4],
+[598956, 1639, 11, 20],
+[599124, 1640, 5, 6],
+[599163, 1640, 6, 14],
+[599167, 1640, 6, 18],
+[599266, 1640, 9, 25],
+[599385, 1641, 1, 22],
+[599555, 1641, 7, 11],
+[599639, 1641, 10, 3],
+[599734, 1642, 1, 6],
+[599828, 1642, 4, 10],
+[600002, 1642, 10, 1],
+[600022, 1642, 10, 21],
+[600026, 1642, 10, 25],
+[600074, 1642, 12, 12],
+[600169, 1643, 3, 17],
+[600284, 1643, 7, 10],
+[600359, 1643, 9, 23],
+[600429, 1643, 12, 2],
+[600569, 1644, 4, 20],
+[600677, 1644, 8, 6],
+[600722, 1644, 9, 20],
+[600817, 1644, 12, 24],
+[600922, 1645, 4, 8],
+[600940, 1645, 4, 26],
+[600966, 1645, 5, 22],
+[601163, 1645, 12, 5],
+[601297, 1646, 4, 18],
+[601373, 1646, 7, 3],
+[601467, 1646, 10, 5],
+[601560, 1647, 1, 6],
+[601586, 1647, 2, 1],
+[601664, 1647, 4, 20],
+[601731, 1647, 6, 26],
+[601770, 1647, 8, 4],
+[601867, 1647, 11, 9],
+[602033, 1648, 4, 23],
+[602174, 1648, 9, 11],
+[602199, 1648, 10, 6],
+[602289, 1649, 1, 4],
+[602363, 1649, 3, 19],
+[602511, 1649, 8, 14],
+[602572, 1649, 10, 14],
+[602702, 1650, 2, 21],
+[602785, 1650, 5, 15],
+[602832, 1650, 7, 1],
+[602875, 1650, 8, 13],
+[603070, 1651, 2, 24],
+[603202, 1651, 7, 6],
+[603328, 1651, 11, 9],
+[603352, 1651, 12, 3],
+[603506, 1652, 5, 5],
+[603564, 1652, 7, 2],
+[603603, 1652, 8, 10],
+[603768, 1653, 1, 22],
+[603816, 1653, 3, 11],
+[603945, 1653, 7, 18],
+[604106, 1653, 12, 26],
+[604293, 1654, 7, 1],
+[604311, 1654, 7, 19],
+[604470, 1654, 12, 25],
+[604646, 1655, 6, 19],
+[604667, 1655, 7, 10],
+[604669, 1655, 7, 12],
+[604731, 1655, 9, 12],
+[604855, 1656, 1, 14],
+[604960, 1656, 4, 28],
+[605020, 1656, 6, 27],
+[605124, 1656, 10, 9],
+[605211, 1657, 1, 4],
+[605317, 1657, 4, 20],
+[605407, 1657, 7, 19],
+[605463, 1657, 9, 13],
+[605517, 1657, 11, 6],
+[605690, 1658, 4, 28],
+[605701, 1658, 5, 9],
+[605787, 1658, 8, 3],
+[605905, 1658, 11, 29],
+[605956, 1659, 1, 19],
+[606113, 1659, 6, 25],
+[606291, 1659, 12, 20],
+[606466, 1660, 6, 12],
+[606630, 1660, 11, 23],
+[606672, 1661, 1, 4],
+[606784, 1661, 4, 26],
+[606870, 1661, 7, 21],
+[606879, 1661, 7, 30],
+[606960, 1661, 10, 19],
+[607158, 1662, 5, 5],
+[607353, 1662, 11, 16],
+[607370, 1662, 12, 3],
+[607499, 1663, 4, 11],
+[607527, 1663, 5, 9],
+[607549, 1663, 5, 31],
+[607689, 1663, 10, 18],
+[607752, 1663, 12, 20],
+[607928, 1664, 6, 13],
+[608127, 1664, 12, 29],
+[608207, 1665, 3, 19],
+[608311, 1665, 7, 1],
+[608357, 1665, 8, 16],
+[608530, 1666, 2, 5],
+[608641, 1666, 5, 27],
+[608709, 1666, 8, 3],
+[608779, 1666, 10, 12],
+[608880, 1667, 1, 21],
+[609008, 1667, 5, 29],
+[609182, 1667, 11, 19],
+[609226, 1668, 1, 2],
+[609285, 1668, 3, 1],
+[609458, 1668, 8, 21],
+[609619, 1669, 1, 29],
+[609647, 1669, 2, 26],
+[609718, 1669, 5, 8],
+[609898, 1669, 11, 4],
+[609921, 1669, 11, 27],
+[610069, 1670, 4, 24],
+[610108, 1670, 6, 2],
+[610175, 1670, 8, 8],
+[610335, 1671, 1, 15],
+[610528, 1671, 7, 27],
+[610655, 1671, 12, 1],
+[610727, 1672, 2, 11],
+[610734, 1672, 2, 18],
+[610780, 1672, 4, 4],
+[610966, 1672, 10, 7],
+[611032, 1672, 12, 12],
+[611168, 1673, 4, 27],
+[611268, 1673, 8, 5],
+[611404, 1673, 12, 19],
+[611405, 1673, 12, 20],
+[611438, 1674, 1, 22],
+[611633, 1674, 8, 5],
+[611787, 1675, 1, 6],
+[611815, 1675, 2, 3],
+[611907, 1675, 5, 6],
+[611963, 1675, 7, 1],
+[611994, 1675, 8, 1],
+[612192, 1676, 2, 15],
+[612374, 1676, 8, 15],
+[612484, 1676, 12, 3],
+[612664, 1677, 6, 1],
+[612754, 1677, 8, 30],
+[612839, 1677, 11, 23],
+[612849, 1677, 12, 3],
+[612949, 1678, 3, 13],
+[613057, 1678, 6, 29],
+[613187, 1678, 11, 6],
+[613282, 1679, 2, 9],
+[613301, 1679, 2, 28],
+[613415, 1679, 6, 22],
+[613471, 1679, 8, 17],
+[613539, 1679, 10, 24],
+[613716, 1680, 4, 18],
+[613752, 1680, 5, 24],
+[613787, 1680, 6, 28],
+[613967, 1680, 12, 25],
+[613999, 1681, 1, 26],
+[614135, 1681, 6, 11],
+[614285, 1681, 11, 8],
+[614290, 1681, 11, 13],
+[614443, 1682, 4, 15],
+[614529, 1682, 7, 10],
+[614650, 1682, 11, 8],
+[614838, 1683, 5, 15],
+[614879, 1683, 6, 25],
+[614946, 1683, 8, 31],
+[615036, 1683, 11, 29],
+[615091, 1684, 1, 23],
+[615246, 1684, 6, 26],
+[615286, 1684, 8, 5],
+[615345, 1684, 10, 3],
+[615443, 1685, 1, 9],
+[615540, 1685, 4, 16],
+[615694, 1685, 9, 17],
+[615849, 1686, 2, 19],
+[615987, 1686, 7, 7],
+[616138, 1686, 12, 5],
+[616255, 1687, 4, 1],
+[616434, 1687, 9, 27],
+[616624, 1688, 4, 4],
+[616650, 1688, 4, 30],
+[616703, 1688, 6, 22],
+[616752, 1688, 8, 10],
+[616759, 1688, 8, 17],
+[616844, 1688, 11, 10],
+[616929, 1689, 2, 3],
+[617067, 1689, 6, 21],
+[617099, 1689, 7, 23],
+[617182, 1689, 10, 14],
+[617240, 1689, 12, 11],
+[617368, 1690, 4, 18],
+[617444, 1690, 7, 3],
+[617469, 1690, 7, 28],
+[617596, 1690, 12, 2],
+[617726, 1691, 4, 11],
+[617918, 1691, 10, 20],
+[617974, 1691, 12, 15],
+[617998, 1692, 1, 8],
+[618182, 1692, 7, 10],
+[618279, 1692, 10, 15],
+[618337, 1692, 12, 12],
+[618437, 1693, 3, 22],
+[618595, 1693, 8, 27],
+[618789, 1694, 3, 9],
+[618955, 1694, 8, 22],
+[619135, 1695, 2, 18],
+[619274, 1695, 7, 7],
+[619419, 1695, 11, 29],
+[619608, 1696, 6, 5],
+[619670, 1696, 8, 6],
+[619740, 1696, 10, 15],
+[619820, 1697, 1, 3],
+[619917, 1697, 4, 10],
+[619936, 1697, 4, 29],
+[619942, 1697, 5, 5],
+[619987, 1697, 6, 19],
+[620073, 1697, 9, 13],
+[620218, 1698, 2, 5],
+[620316, 1698, 5, 14],
+[620378, 1698, 7, 15],
+[620493, 1698, 11, 7],
+[620602, 1699, 2, 24],
+[620630, 1699, 3, 24],
+[620753, 1699, 7, 25],
+[620898, 1699, 12, 17],
+[620995, 1700, 3, 24],
+[621080, 1700, 6, 17],
+[621232, 1700, 11, 16],
+[621312, 1701, 2, 4],
+[621381, 1701, 4, 14],
+[621481, 1701, 7, 23],
+[621513, 1701, 8, 24],
+[621614, 1701, 12, 3],
+[621666, 1702, 1, 24],
+[621732, 1702, 3, 31],
+[621854, 1702, 7, 31],
+[622030, 1703, 1, 23],
+[622055, 1703, 2, 17],
+[622124, 1703, 4, 27],
+[622290, 1703, 10, 10],
+[622426, 1704, 2, 23],
+[622451, 1704, 3, 19],
+[622522, 1704, 5, 29],
+[622714, 1704, 12, 7],
+[622901, 1705, 6, 12],
+[622938, 1705, 7, 19],
+[622956, 1705, 8, 6],
+[623054, 1705, 11, 12],
+[623227, 1706, 5, 4],
+[623239, 1706, 5, 16],
+[623241, 1706, 5, 18],
+[623346, 1706, 8, 31],
+[623496, 1707, 1, 28],
+[623497, 1707, 1, 29],
+[623690, 1707, 8, 10],
+[623871, 1708, 2, 7],
+[623970, 1708, 5, 16],
+[624064, 1708, 8, 18],
+[624116, 1708, 10, 9],
+[624136, 1708, 10, 29],
+[624252, 1709, 2, 22],
+[624436, 1709, 8, 25],
+[624608, 1710, 2, 13],
+[624699, 1710, 5, 15],
+[624741, 1710, 6, 26],
+[624865, 1710, 10, 28],
+[624952, 1711, 1, 23],
+[625054, 1711, 5, 5],
+[625141, 1711, 7, 31],
+[625333, 1712, 2, 8],
+[625378, 1712, 3, 24],
+[625506, 1712, 7, 30],
+[625514, 1712, 8, 7],
+[625666, 1713, 1, 6],
+[625745, 1713, 3, 26],
+[625872, 1713, 7, 31],
+[625991, 1713, 11, 27],
+[626099, 1714, 3, 15],
+[626108, 1714, 3, 24],
+[626268, 1714, 8, 31],
+[626283, 1714, 9, 15],
+[626385, 1714, 12, 26],
+[626525, 1715, 5, 15],
+[626647, 1715, 9, 14],
+[626779, 1716, 1, 24],
+[626849, 1716, 4, 3],
+[626897, 1716, 5, 21],
+[626952, 1716, 7, 15],
+[627065, 1716, 11, 5],
+[627156, 1717, 2, 4],
+[627308, 1717, 7, 6],
+[627405, 1717, 10, 11],
+[627474, 1717, 12, 19],
+[627548, 1718, 3, 3],
+[627745, 1718, 9, 16],
+[627771, 1718, 10, 12],
+[627948, 1719, 4, 7],
+[628099, 1719, 9, 5],
+[628168, 1719, 11, 13],
+[628254, 1720, 2, 7],
+[628382, 1720, 6, 14],
+[628445, 1720, 8, 16],
+[628560, 1720, 12, 9],
+[628645, 1721, 3, 4],
+[628768, 1721, 7, 5],
+[628868, 1721, 10, 13],
+[628913, 1721, 11, 27],
+[628993, 1722, 2, 15],
+[629006, 1722, 2, 28],
+[629084, 1722, 5, 17],
+[629164, 1722, 8, 5],
+[629229, 1722, 10, 9],
+[629393, 1723, 3, 22],
+[629421, 1723, 4, 19],
+[629592, 1723, 10, 7],
+[629688, 1724, 1, 11],
+[629774, 1724, 4, 6],
+[629926, 1724, 9, 5],
+[630094, 1725, 2, 20],
+[630266, 1725, 8, 11],
+[630461, 1726, 2, 22],
+[630569, 1726, 6, 10],
+[630761, 1726, 12, 19],
+[630918, 1727, 5, 25],
+[631035, 1727, 9, 19],
+[631217, 1728, 3, 19],
+[631283, 1728, 5, 24],
+[631442, 1728, 10, 30],
+[631501, 1728, 12, 28],
+[631549, 1729, 2, 14],
+[631738, 1729, 8, 22],
+[631833, 1729, 11, 25],
+[632004, 1730, 5, 15],
+[632117, 1730, 9, 5],
+[632244, 1731, 1, 10],
+[632443, 1731, 7, 28],
+[632570, 1731, 12, 2],
+[632622, 1732, 1, 23],
+[632679, 1732, 3, 20],
+[632786, 1732, 7, 5],
+[632969, 1733, 1, 4],
+[633139, 1733, 6, 23],
+[633178, 1733, 8, 1],
+[633350, 1734, 1, 20],
+[633424, 1734, 4, 4],
+[633437, 1734, 4, 17],
+[633615, 1734, 10, 12],
+[633737, 1735, 2, 11],
+[633913, 1735, 8, 6],
+[634107, 1736, 2, 16],
+[634226, 1736, 6, 14],
+[634348, 1736, 10, 14],
+[634409, 1736, 12, 14],
+[634440, 1737, 1, 14],
+[634516, 1737, 3, 31],
+[634621, 1737, 7, 14],
+[634722, 1737, 10, 23],
+[634837, 1738, 2, 15],
+[635001, 1738, 7, 29],
+[635082, 1738, 10, 18],
+[635084, 1738, 10, 20],
+[635229, 1739, 3, 14],
+[635307, 1739, 5, 31],
+[635391, 1739, 8, 23],
+[635419, 1739, 9, 20],
+[635512, 1739, 12, 22],
+[635590, 1740, 3, 9],
+[635628, 1740, 4, 16],
+[635808, 1740, 10, 13],
+[635880, 1740, 12, 24],
+[636040, 1741, 6, 2],
+[636066, 1741, 6, 28],
+[636178, 1741, 10, 18],
+[636302, 1742, 2, 19],
+[636451, 1742, 7, 18],
+[636500, 1742, 9, 5],
+[636570, 1742, 11, 14],
+[636727, 1743, 4, 20],
+[636767, 1743, 5, 30],
+[636870, 1743, 9, 10],
+[637061, 1744, 3, 19],
+[637226, 1744, 8, 31],
+[637258, 1744, 10, 2],
+[637407, 1745, 2, 28],
+[637570, 1745, 8, 10],
+[637725, 1746, 1, 12],
+[637876, 1746, 6, 12],
+[637928, 1746, 8, 3],
+[637977, 1746, 9, 21],
+[638143, 1747, 3, 6],
+[638297, 1747, 8, 7],
+[638486, 1748, 2, 12],
+[638614, 1748, 6, 19],
+[638700, 1748, 9, 13],
+[638787, 1748, 12, 9],
+[638926, 1749, 4, 27],
+[639112, 1749, 10, 30],
+[639272, 1750, 4, 8],
+[639433, 1750, 9, 16],
+[639620, 1751, 3, 22],
+[639645, 1751, 4, 16],
+[639665, 1751, 5, 6],
+[639724, 1751, 7, 4],
+[639881, 1751, 12, 8],
+[640032, 1752, 5, 7],
+[640184, 1752, 10, 6],
+[640310, 1753, 2, 9],
+[640430, 1753, 6, 9],
+[640507, 1753, 8, 25],
+[640602, 1753, 11, 28],
+[640775, 1754, 5, 20],
+[640898, 1754, 9, 20],
+[641079, 1755, 3, 20],
+[641150, 1755, 5, 30],
+[641202, 1755, 7, 21],
+[641236, 1755, 8, 24],
+[641318, 1755, 11, 14],
+[641435, 1756, 3, 10],
+[641587, 1756, 8, 9],
+[641745, 1757, 1, 14],
+[641747, 1757, 1, 16],
+[641887, 1757, 6, 5],
+[642035, 1757, 10, 31],
+[642049, 1757, 11, 14],
+[642067, 1757, 12, 2],
+[642241, 1758, 5, 25],
+[642427, 1758, 11, 27],
+[642505, 1759, 2, 13],
+[642604, 1759, 5, 23],
+[642666, 1759, 7, 24],
+[642717, 1759, 9, 13],
+[642810, 1759, 12, 15],
+[642821, 1759, 12, 26],
+[642877, 1760, 2, 20],
+[643039, 1760, 7, 31],
+[643229, 1761, 2, 6],
+[643397, 1761, 7, 24],
+[643429, 1761, 8, 25],
+[643476, 1761, 10, 11],
+[643486, 1761, 10, 21],
+[643565, 1762, 1, 8],
+[643650, 1762, 4, 3],
+[643750, 1762, 7, 12],
+[643889, 1762, 11, 28],
+[644080, 1763, 6, 7],
+[644093, 1763, 6, 20],
+[644223, 1763, 10, 28],
+[644322, 1764, 2, 4],
+[644388, 1764, 4, 10],
+[644572, 1764, 10, 11],
+[644626, 1764, 12, 4],
+[644766, 1765, 4, 23],
+[644773, 1765, 4, 30],
+[644928, 1765, 10, 2],
+[645069, 1766, 2, 20],
+[645247, 1766, 8, 17],
+[645376, 1766, 12, 24],
+[645399, 1767, 1, 16],
+[645508, 1767, 5, 5],
+[645596, 1767, 8, 1],
+[645778, 1768, 1, 30],
+[645876, 1768, 5, 7],
+[645988, 1768, 8, 27],
+[646175, 1769, 3, 2],
+[646255, 1769, 5, 21],
+[646313, 1769, 7, 18],
+[646445, 1769, 11, 27],
+[646514, 1770, 2, 4],
+[646558, 1770, 3, 20],
+[646715, 1770, 8, 24],
+[646771, 1770, 10, 19],
+[646925, 1771, 3, 22],
+[646940, 1771, 4, 6],
+[647015, 1771, 6, 20],
+[647043, 1771, 7, 18],
+[647225, 1772, 1, 16],
+[647425, 1772, 8, 3],
+[647508, 1772, 10, 25],
+[647628, 1773, 2, 22],
+[647712, 1773, 5, 17],
+[647911, 1773, 12, 2],
+[648003, 1774, 3, 4],
+[648140, 1774, 7, 19],
+[648217, 1774, 10, 4],
+[648293, 1774, 12, 19],
+[648381, 1775, 3, 17],
+[648398, 1775, 4, 3],
+[648417, 1775, 4, 22],
+[648480, 1775, 6, 24],
+[648677, 1776, 1, 7],
+[648688, 1776, 1, 18],
+[648819, 1776, 5, 28],
+[648901, 1776, 8, 18],
+[649002, 1776, 11, 27],
+[649075, 1777, 2, 8],
+[649133, 1777, 4, 7],
+[649165, 1777, 5, 9],
+[649175, 1777, 5, 19],
+[649209, 1777, 6, 22],
+[649292, 1777, 9, 13],
+[649409, 1778, 1, 8],
+[649513, 1778, 4, 22],
+[649692, 1778, 10, 18],
+[649836, 1779, 3, 11],
+[649974, 1779, 7, 27],
+[650166, 1780, 2, 4],
+[650334, 1780, 7, 21],
+[650478, 1780, 12, 12],
+[650521, 1781, 1, 24],
+[650569, 1781, 3, 13],
+[650657, 1781, 6, 9],
+[650679, 1781, 7, 1],
+[650837, 1781, 12, 6],
+[650900, 1782, 2, 7],
+[650911, 1782, 2, 18],
+[651087, 1782, 8, 13],
+[651232, 1783, 1, 5],
+[651288, 1783, 3, 2],
+[651421, 1783, 7, 13],
+[651621, 1784, 1, 29],
+[651649, 1784, 2, 26],
+[651776, 1784, 7, 2],
+[651935, 1784, 12, 8],
+[651952, 1784, 12, 25],
+[652132, 1785, 6, 23],
+[652228, 1785, 9, 27],
+[652301, 1785, 12, 9],
+[652398, 1786, 3, 16],
+[652449, 1786, 5, 6],
+[652545, 1786, 8, 10],
+[652616, 1786, 10, 20],
+[652696, 1787, 1, 8],
+[652745, 1787, 2, 26],
+[652913, 1787, 8, 13],
+[652949, 1787, 9, 18],
+[652997, 1787, 11, 5],
+[653051, 1787, 12, 29],
+[653249, 1788, 7, 14],
+[653275, 1788, 8, 9],
+[653368, 1788, 11, 10],
+[653444, 1789, 1, 25],
+[653606, 1789, 7, 6],
+[653803, 1790, 1, 19],
+[653874, 1790, 3, 31],
+[653926, 1790, 5, 22],
+[653979, 1790, 7, 14],
+[654093, 1790, 11, 5],
+[654134, 1790, 12, 16],
+[654232, 1791, 3, 24],
+[654280, 1791, 5, 11],
+[654355, 1791, 7, 25],
+[654455, 1791, 11, 2],
+[654485, 1791, 12, 2],
+[654662, 1792, 5, 27],
+[654723, 1792, 7, 27],
+[654818, 1792, 10, 30],
+[654928, 1793, 2, 17],
+[654995, 1793, 4, 25],
+[655042, 1793, 6, 11],
+[655103, 1793, 8, 11],
+[655264, 1794, 1, 19],
+[655286, 1794, 2, 10],
+[655359, 1794, 4, 24],
+[655426, 1794, 6, 30],
+[655519, 1794, 10, 1],
+[655679, 1795, 3, 10],
+[655755, 1795, 5, 25],
+[655943, 1795, 11, 29],
+[655945, 1795, 12, 1],
+[656034, 1796, 2, 28],
+[656148, 1796, 6, 21],
+[656218, 1796, 8, 30],
+[656221, 1796, 9, 2],
+[656352, 1797, 1, 11],
+[656540, 1797, 7, 18],
+[656634, 1797, 10, 20],
+[656804, 1798, 4, 8],
+[656843, 1798, 5, 17],
+[656875, 1798, 6, 18],
+[656952, 1798, 9, 3],
+[657057, 1798, 12, 17],
+[657156, 1799, 3, 26],
+[657314, 1799, 8, 31],
+[657315, 1799, 9, 1],
+[657476, 1800, 2, 9],
+[657505, 1800, 3, 10],
+[657546, 1800, 4, 20],
+[657703, 1800, 9, 24],
+[657712, 1800, 10, 3],
+[657771, 1800, 12, 1],
+[657926, 1801, 5, 5],
+[657966, 1801, 6, 14],
+[658080, 1801, 10, 6],
+[658259, 1802, 4, 3],
+[658439, 1802, 9, 30],
+[658610, 1803, 3, 20],
+[658628, 1803, 4, 7],
+[658794, 1803, 9, 20],
+[658806, 1803, 10, 2],
+[658969, 1804, 3, 13],
+[659032, 1804, 5, 15],
+[659219, 1804, 11, 18],
+[659262, 1804, 12, 31],
+[659457, 1805, 7, 14],
+[659495, 1805, 8, 21],
+[659659, 1806, 2, 1],
+[659669, 1806, 2, 11],
+[659765, 1806, 5, 18],
+[659932, 1806, 11, 1],
+[660098, 1807, 4, 16],
+[660154, 1807, 6, 11],
+[660262, 1807, 9, 27],
+[660439, 1808, 3, 22],
+[660543, 1808, 7, 4],
+[660548, 1808, 7, 9],
+[660681, 1808, 11, 19],
+[660746, 1809, 1, 23],
+[660793, 1809, 3, 11],
+[660825, 1809, 4, 12],
+[661007, 1809, 10, 11],
+[661177, 1810, 3, 30],
+[661199, 1810, 4, 21],
+[661259, 1810, 6, 20],
+[661452, 1810, 12, 30],
+[661644, 1811, 7, 10],
+[661844, 1812, 1, 26],
+[661934, 1812, 4, 25],
+[662055, 1812, 8, 24],
+[662206, 1813, 1, 22],
+[662363, 1813, 6, 28],
+[662447, 1813, 9, 20],
+[662636, 1814, 3, 28],
+[662666, 1814, 4, 27],
+[662802, 1814, 9, 10],
+[662974, 1815, 3, 1],
+[663146, 1815, 8, 20],
+[663275, 1815, 12, 27],
+[663328, 1816, 2, 18],
+[663451, 1816, 6, 20],
+[663547, 1816, 9, 24],
+[663576, 1816, 10, 23],
+[663604, 1816, 11, 20],
+[663794, 1817, 5, 29],
+[663846, 1817, 7, 20],
+[663897, 1817, 9, 9],
+[663899, 1817, 9, 11],
+[664048, 1818, 2, 7],
+[664145, 1818, 5, 15],
+[664206, 1818, 7, 15],
+[664358, 1818, 12, 14],
+[664550, 1819, 6, 24],
+[664636, 1819, 9, 18],
+[664782, 1820, 2, 11],
+[664919, 1820, 6, 27],
+[664968, 1820, 8, 15],
+[665126, 1821, 1, 20],
+[665298, 1821, 7, 11],
+[665415, 1821, 11, 5],
+[665428, 1821, 11, 18],
+[665617, 1822, 5, 26],
+[665634, 1822, 6, 12],
+[665683, 1822, 7, 31],
+[665729, 1822, 9, 15],
+[665796, 1822, 11, 21],
+[665972, 1823, 5, 16],
+[666069, 1823, 8, 21],
+[666114, 1823, 10, 5],
+[666177, 1823, 12, 7],
+[666337, 1824, 5, 15],
+[666524, 1824, 11, 18],
+[666697, 1825, 5, 10],
+[666782, 1825, 8, 3],
+[666873, 1825, 11, 2],
+[666957, 1826, 1, 25],
+[667032, 1826, 4, 10],
+[667178, 1826, 9, 3],
+[667193, 1826, 9, 18],
+[667386, 1827, 3, 30],
+[667546, 1827, 9, 6],
+[667678, 1828, 1, 16],
+[667722, 1828, 2, 29],
+[667809, 1828, 5, 26],
+[667941, 1828, 10, 5],
+[667983, 1828, 11, 16],
+[668108, 1829, 3, 21],
+[668198, 1829, 6, 19],
+[668247, 1829, 8, 7],
+[668425, 1830, 2, 1],
+[668622, 1830, 8, 17],
+[668746, 1830, 12, 19],
+[668927, 1831, 6, 18],
+[669116, 1831, 12, 24],
+[669234, 1832, 4, 20],
+[669268, 1832, 5, 24],
+[669405, 1832, 10, 8],
+[669499, 1833, 1, 10],
+[669681, 1833, 7, 11],
+[669780, 1833, 10, 18],
+[669933, 1834, 3, 20],
+[670093, 1834, 8, 27],
+[670111, 1834, 9, 14],
+[670225, 1835, 1, 6],
+[670263, 1835, 2, 13],
+[670264, 1835, 2, 14],
+[670436, 1835, 8, 5],
+[670550, 1835, 11, 27],
+[670603, 1836, 1, 19],
+[670737, 1836, 6, 1],
+[670837, 1836, 9, 9],
+[671025, 1837, 3, 16],
+[671127, 1837, 6, 26],
+[671275, 1837, 11, 21],
+[671412, 1838, 4, 7],
+[671416, 1838, 4, 11],
+[671544, 1838, 8, 17],
+[671702, 1839, 1, 22],
+[671814, 1839, 5, 14],
+[671966, 1839, 10, 13],
+[672158, 1840, 4, 22],
+[672358, 1840, 11, 8],
+[672437, 1841, 1, 26],
+[672478, 1841, 3, 8],
+[672561, 1841, 5, 30],
+[672653, 1841, 8, 30],
+[672811, 1842, 2, 4],
+[672977, 1842, 7, 20],
+[673077, 1842, 10, 28],
+[673144, 1843, 1, 3],
+[673193, 1843, 2, 21],
+[673328, 1843, 7, 6],
+[673348, 1843, 7, 26],
+[673395, 1843, 9, 11],
+[673548, 1844, 2, 11],
+[673665, 1844, 6, 7],
+[673863, 1844, 12, 22],
+[674062, 1845, 7, 9],
+[674250, 1846, 1, 13],
+[674315, 1846, 3, 19],
+[674368, 1846, 5, 11],
+[674453, 1846, 8, 4],
+[674549, 1846, 11, 8],
+[674685, 1847, 3, 24],
+[674697, 1847, 4, 5],
+[674784, 1847, 7, 1],
+[674887, 1847, 10, 12],
+[675083, 1848, 4, 25],
+[675267, 1848, 10, 26],
+[675274, 1848, 11, 2],
+[675297, 1848, 11, 25],
+[675333, 1848, 12, 31],
+[675378, 1849, 2, 14],
+[675550, 1849, 8, 5],
+[675694, 1849, 12, 27],
+[675775, 1850, 3, 18],
+[675961, 1850, 9, 20],
+[676069, 1851, 1, 6],
+[676212, 1851, 5, 29],
+[676295, 1851, 8, 20],
+[676451, 1852, 1, 23],
+[676644, 1852, 8, 3],
+[676698, 1852, 9, 26],
+[676795, 1853, 1, 1],
+[676971, 1853, 6, 26],
+[677014, 1853, 8, 8],
+[677080, 1853, 10, 13],
+[677111, 1853, 11, 13],
+[677169, 1854, 1, 10],
+[677195, 1854, 2, 5],
+[677338, 1854, 6, 28],
+[677443, 1854, 10, 11],
+[677640, 1855, 4, 26],
+[677715, 1855, 7, 10],
+[677816, 1855, 10, 19],
+[677823, 1855, 10, 26],
+[677934, 1856, 2, 14],
+[678019, 1856, 5, 9],
+[678154, 1856, 9, 21],
+[678303, 1857, 2, 17],
+[678345, 1857, 3, 31],
+[678410, 1857, 6, 4],
+[678459, 1857, 7, 23],
+[678608, 1857, 12, 19],
+[678793, 1858, 6, 22],
+[678983, 1858, 12, 29],
+[679074, 1859, 3, 30],
+[679163, 1859, 6, 27],
+[679215, 1859, 8, 18],
+[679251, 1859, 9, 23],
+[679280, 1859, 10, 22],
+[679426, 1860, 3, 16],
+[679441, 1860, 3, 31],
+[679610, 1860, 9, 16],
+[679646, 1860, 10, 22],
+[679763, 1861, 2, 16],
+[679873, 1861, 6, 6],
+[680067, 1861, 12, 17],
+[680142, 1862, 3, 2],
+[680249, 1862, 6, 17],
+[680420, 1862, 12, 5],
+[680517, 1863, 3, 12],
+[680541, 1863, 4, 5],
+[680721, 1863, 10, 2],
+[680809, 1863, 12, 29],
+[680888, 1864, 3, 17],
+[680943, 1864, 5, 11],
+[680975, 1864, 6, 12],
+[681040, 1864, 8, 16],
+[681086, 1864, 10, 1],
+[681186, 1865, 1, 9],
+[681198, 1865, 1, 21],
+[681390, 1865, 8, 1],
+[681581, 1866, 2, 8],
+[681653, 1866, 4, 21],
+[681820, 1866, 10, 5],
+[681926, 1867, 1, 19],
+[682064, 1867, 6, 6],
+[682149, 1867, 8, 30],
+[682222, 1867, 11, 11],
+[682307, 1868, 2, 4],
+[682318, 1868, 2, 15],
+[682330, 1868, 2, 27],
+[682385, 1868, 4, 22],
+[682478, 1868, 7, 24],
+[682591, 1868, 11, 14],
+[682787, 1869, 5, 29],
+[682866, 1869, 8, 16],
+[683056, 1870, 2, 22],
+[683183, 1870, 6, 29],
+[683338, 1870, 12, 1],
+[683428, 1871, 3, 1],
+[683617, 1871, 9, 6],
+[683808, 1872, 3, 15],
+[683918, 1872, 7, 3],
+[684116, 1873, 1, 17],
+[684241, 1873, 5, 22],
+[684394, 1873, 10, 22],
+[684529, 1874, 3, 6],
+[684674, 1874, 7, 29],
+[684865, 1875, 2, 5],
+[684994, 1875, 6, 14],
+[685034, 1875, 7, 24],
+[685101, 1875, 9, 29],
+[685111, 1875, 10, 9],
+[685153, 1875, 11, 20],
+[685199, 1876, 1, 5],
+[685271, 1876, 3, 17],
+[685367, 1876, 6, 21],
+[685491, 1876, 10, 23],
+[685561, 1877, 1, 1],
+[685753, 1877, 7, 12],
+[685936, 1878, 1, 11],
+[686107, 1878, 7, 1],
+[686220, 1878, 10, 22],
+[686343, 1879, 2, 22],
+[686491, 1879, 7, 20],
+[686621, 1879, 11, 27],
+[686633, 1879, 12, 9],
+[686795, 1880, 5, 19],
+[686928, 1880, 9, 29],
+[687071, 1881, 2, 19],
+[687180, 1881, 6, 8],
+[687181, 1881, 6, 9],
+[687221, 1881, 7, 19],
+[687227, 1881, 7, 25],
+[687312, 1881, 10, 18],
+[687478, 1882, 4, 2],
+[687677, 1882, 10, 18],
+[687772, 1883, 1, 21],
+[687879, 1883, 5, 8],
+[688020, 1883, 9, 26],
+[688028, 1883, 10, 4],
+[688033, 1883, 10, 9],
+[688119, 1884, 1, 3],
+[688269, 1884, 6, 1],
+[688357, 1884, 8, 28],
+[688439, 1884, 11, 18],
+[688578, 1885, 4, 6],
+[688660, 1885, 6, 27],
+[688752, 1885, 9, 27],
+[688769, 1885, 10, 14],
+[688968, 1886, 5, 1],
+[689166, 1886, 11, 15],
+[689211, 1886, 12, 30],
+[689337, 1887, 5, 5],
+[689352, 1887, 5, 20],
+[689490, 1887, 10, 5],
+[689554, 1887, 12, 8],
+[689574, 1887, 12, 28],
+[689760, 1888, 7, 1],
+[689917, 1888, 12, 5],
+[690038, 1889, 4, 5],
+[690202, 1889, 9, 16],
+[690205, 1889, 9, 19],
+[690222, 1889, 10, 6],
+[690299, 1889, 12, 22],
+[690472, 1890, 6, 13],
+[690508, 1890, 7, 19],
+[690604, 1890, 10, 23],
+[690701, 1891, 1, 28],
+[690860, 1891, 7, 6],
+[691054, 1892, 1, 16],
+[691154, 1892, 4, 25],
+[691288, 1892, 9, 6],
+[691348, 1892, 11, 5],
+[691440, 1893, 2, 5],
+[691538, 1893, 5, 14],
+[691560, 1893, 6, 5],
+[691660, 1893, 9, 13],
+[691694, 1893, 10, 17],
+[691890, 1894, 5, 1],
+[692028, 1894, 9, 16],
+[692073, 1894, 10, 31],
+[692268, 1895, 5, 14],
+[692341, 1895, 7, 26],
+[692448, 1895, 11, 10],
+[692450, 1895, 11, 12],
+[692580, 1896, 3, 21],
+[692662, 1896, 6, 11],
+[692680, 1896, 6, 29],
+[692793, 1896, 10, 20],
+[692917, 1897, 2, 21],
+[692948, 1897, 3, 24],
+[692995, 1897, 5, 10],
+[693024, 1897, 6, 8],
+[693214, 1897, 12, 15],
+[693279, 1898, 2, 18],
+[693388, 1898, 6, 7],
+[693432, 1898, 7, 21],
+[693449, 1898, 8, 7],
+[693613, 1899, 1, 18],
+[693686, 1899, 4, 1],
+[693767, 1899, 6, 21],
+[693890, 1899, 10, 22],
+[693924, 1899, 11, 25],
+[694083, 1900, 5, 3],
+[694139, 1900, 6, 28],
+[694253, 1900, 10, 20],
+[694369, 1901, 2, 13],
+[694561, 1901, 8, 24],
+[694728, 1902, 2, 7],
+[694855, 1902, 6, 14],
+[694942, 1902, 9, 9],
+[695080, 1903, 1, 25],
+[695180, 1903, 5, 5],
+[695269, 1903, 8, 2],
+[695369, 1903, 11, 10],
+[695560, 1904, 5, 19],
+[695570, 1904, 5, 29],
+[695637, 1904, 8, 4],
+[695690, 1904, 9, 26],
+[695854, 1905, 3, 9],
+[695888, 1905, 4, 12],
+[695985, 1905, 7, 18],
+[696007, 1905, 8, 9],
+[696016, 1905, 8, 18],
+[696124, 1905, 12, 4],
+[696159, 1906, 1, 8],
+[696203, 1906, 2, 21],
+[696214, 1906, 3, 4],
+[696323, 1906, 6, 21],
+[696332, 1906, 6, 30],
+[696405, 1906, 9, 11],
+[696585, 1907, 3, 10],
+[696667, 1907, 5, 31],
+[696723, 1907, 7, 26],
+[696804, 1907, 10, 15],
+[696919, 1908, 2, 7],
+[697010, 1908, 5, 8],
+[697048, 1908, 6, 15],
+[697107, 1908, 8, 13],
+[697290, 1909, 2, 12],
+[697471, 1909, 8, 12],
+[697524, 1909, 10, 4],
+[697571, 1909, 11, 20],
+[697723, 1910, 4, 21],
+[697892, 1910, 10, 7],
+[697917, 1910, 11, 1],
+[698001, 1911, 1, 24],
+[698093, 1911, 4, 26],
+[698258, 1911, 10, 8],
+[698268, 1911, 10, 18],
+[698269, 1911, 10, 19],
+[698457, 1912, 4, 24],
+[698628, 1912, 10, 12],
+[698684, 1912, 12, 7],
+[698860, 1913, 6, 1],
+[698919, 1913, 7, 30],
+[698990, 1913, 10, 9],
+[699065, 1913, 12, 23],
+[699161, 1914, 3, 29],
+[699213, 1914, 5, 20],
+[699368, 1914, 10, 22],
+[699523, 1915, 3, 26],
+[699705, 1915, 9, 24],
+[699797, 1915, 12, 25],
+[699881, 1916, 3, 18],
+[699891, 1916, 3, 28],
+[700003, 1916, 7, 18],
+[700158, 1916, 12, 20],
+[700185, 1917, 1, 16],
+[700338, 1917, 6, 18],
+[700426, 1917, 9, 14],
+[700499, 1917, 11, 26],
+[700505, 1917, 12, 2],
+[700663, 1918, 5, 9],
+[700729, 1918, 7, 14],
+[700785, 1918, 9, 8],
+[700807, 1918, 9, 30],
+[700825, 1918, 10, 18],
+[700872, 1918, 12, 4],
+[701045, 1919, 5, 26],
+[701200, 1919, 10, 28],
+[701237, 1919, 12, 4],
+[701411, 1920, 5, 26],
+[701418, 1920, 6, 2],
+[701459, 1920, 7, 13],
+[701592, 1920, 11, 23],
+[701620, 1920, 12, 21],
+[701691, 1921, 3, 2],
+[701807, 1921, 6, 26],
+[701885, 1921, 9, 12],
+[701960, 1921, 11, 26],
+[702029, 1922, 2, 3],
+[702155, 1922, 6, 9],
+[702244, 1922, 9, 6],
+[702291, 1922, 10, 23],
+[702481, 1923, 5, 1],
+[702621, 1923, 9, 18],
+[702821, 1924, 4, 5],
+[702911, 1924, 7, 4],
+[703008, 1924, 10, 9],
+[703133, 1925, 2, 11],
+[703251, 1925, 6, 9],
+[703291, 1925, 7, 19],
+[703418, 1925, 11, 23],
+[703445, 1925, 12, 20],
+[703501, 1926, 2, 14],
+[703569, 1926, 4, 23],
+[703644, 1926, 7, 7],
+[703664, 1926, 7, 27],
+[703697, 1926, 8, 29],
+[703754, 1926, 10, 25],
+[703848, 1927, 1, 27],
+[703965, 1927, 5, 24],
+[704071, 1927, 9, 7],
+[704219, 1928, 2, 2],
+[704235, 1928, 2, 18],
+[704377, 1928, 7, 9],
+[704546, 1928, 12, 25],
+[704581, 1929, 1, 29],
+[704620, 1929, 3, 9],
+[704804, 1929, 9, 9],
+[704833, 1929, 10, 8],
+[705011, 1930, 4, 4],
+[705059, 1930, 5, 22],
+[705062, 1930, 5, 25],
+[705188, 1930, 9, 28],
+[705332, 1931, 2, 19],
+[705513, 1931, 8, 19],
+[705572, 1931, 10, 17],
+[705715, 1932, 3, 8],
+[705836, 1932, 7, 7],
+[705959, 1932, 11, 7],
+[706103, 1933, 3, 31],
+[706246, 1933, 8, 21],
+[706342, 1933, 11, 25],
+[706347, 1933, 11, 30],
+[706439, 1934, 3, 2],
+[706531, 1934, 6, 2],
+[706669, 1934, 10, 18],
+[706763, 1935, 1, 20],
+[706765, 1935, 1, 22],
+[706872, 1935, 5, 9],
+[707027, 1935, 10, 11],
+[707076, 1935, 11, 29],
+[707208, 1936, 4, 9],
+[707244, 1936, 5, 15],
+[707355, 1936, 9, 3],
+[707493, 1937, 1, 19],
+[707524, 1937, 2, 19],
+[707644, 1937, 6, 19],
+[707771, 1937, 10, 24],
+[707916, 1938, 3, 18],
+[708059, 1938, 8, 8],
+[708085, 1938, 9, 3],
+[708229, 1939, 1, 25],
+[708232, 1939, 1, 28],
+[708288, 1939, 3, 25],
+[708469, 1939, 9, 22],
+[708643, 1940, 3, 14],
+[708793, 1940, 8, 11],
+[708954, 1941, 1, 19],
+[709052, 1941, 4, 27],
+[709080, 1941, 5, 25],
+[709154, 1941, 8, 7],
+[709309, 1942, 1, 9],
+[709419, 1942, 4, 29],
+[709426, 1942, 5, 6],
+[709435, 1942, 5, 15],
+[709507, 1942, 7, 26],
+[709535, 1942, 8, 23],
+[709563, 1942, 9, 20],
+[709662, 1942, 12, 28],
+[709835, 1943, 6, 19],
+[709987, 1943, 11, 18],
+[710131, 1944, 4, 10],
+[710164, 1944, 5, 13],
+[710205, 1944, 6, 23],
+[710214, 1944, 7, 2],
+[710279, 1944, 9, 5],
+[710319, 1944, 10, 15],
+[710400, 1945, 1, 4],
+[710521, 1945, 5, 5],
+[710529, 1945, 5, 13],
+[710691, 1945, 10, 22],
+[710743, 1945, 12, 13],
+[710832, 1946, 3, 12],
+[710966, 1946, 7, 24],
+[711012, 1946, 9, 8],
+[711018, 1946, 9, 14],
+[711204, 1947, 3, 19],
+[711285, 1947, 6, 8],
+[711452, 1947, 11, 22],
+[711531, 1948, 2, 9],
+[711710, 1948, 8, 6],
+[711907, 1949, 2, 19],
+[711981, 1949, 5, 4],
+[712109, 1949, 9, 9],
+[712117, 1949, 9, 17],
+[712240, 1950, 1, 18],
+[712371, 1950, 5, 29],
+[712436, 1950, 8, 2],
+[712474, 1950, 9, 9],
+[712656, 1951, 3, 10],
+[712754, 1951, 6, 16],
+[712845, 1951, 9, 15],
+[712946, 1951, 12, 25],
+[713102, 1952, 5, 29],
+[713299, 1952, 12, 12],
+[713498, 1953, 6, 29],
+[713561, 1953, 8, 31],
+[713656, 1953, 12, 4],
+[713845, 1954, 6, 11],
+[713907, 1954, 8, 12],
+[713910, 1954, 8, 15],
+[713974, 1954, 10, 18],
+[714017, 1954, 11, 30],
+[714132, 1955, 3, 25],
+[714175, 1955, 5, 7],
+[714311, 1955, 9, 20],
+[714416, 1956, 1, 3],
+[714511, 1956, 4, 7],
+[714581, 1956, 6, 16],
+[714693, 1956, 10, 6],
+[714732, 1956, 11, 14],
+[714875, 1957, 4, 6],
+[715048, 1957, 9, 26],
+[715090, 1957, 11, 7],
+[715127, 1957, 12, 14],
+[715220, 1958, 3, 17],
+[715368, 1958, 8, 12],
+[715415, 1958, 9, 28],
+[715419, 1958, 10, 2],
+[715450, 1958, 11, 2],
+[715633, 1959, 5, 4],
+[715682, 1959, 6, 22],
+[715712, 1959, 7, 22],
+[715750, 1959, 8, 29],
+[715896, 1960, 1, 22],
+[715957, 1960, 3, 23],
+[716119, 1960, 9, 1],
+[716260, 1961, 1, 20],
+[716378, 1961, 5, 18],
+[716400, 1961, 6, 9],
+[716487, 1961, 9, 4],
+[716575, 1961, 12, 1],
+[716676, 1962, 3, 12],
+[716682, 1962, 3, 18],
+[716801, 1962, 7, 15],
+[716978, 1963, 1, 8],
+[717008, 1963, 2, 7],
+[717030, 1963, 3, 1],
+[717120, 1963, 5, 30],
+[717179, 1963, 7, 28],
+[717215, 1963, 9, 2],
+[717394, 1964, 2, 28],
+[717495, 1964, 6, 8],
+[717507, 1964, 6, 20],
+[717559, 1964, 8, 11],
+[717598, 1964, 9, 19],
+[717680, 1964, 12, 10],
+[717859, 1965, 6, 7],
+[717953, 1965, 9, 9],
+[718070, 1966, 1, 4],
+[718210, 1966, 5, 24],
+[718265, 1966, 7, 18],
+[718353, 1966, 10, 14],
+[718450, 1967, 1, 19],
+[718506, 1967, 3, 16],
+[718529, 1967, 4, 8],
+[718703, 1967, 9, 29],
+[718900, 1968, 4, 13],
+[718986, 1968, 7, 8],
+[719029, 1968, 8, 20],
+[719228, 1969, 3, 7],
+[719362, 1969, 7, 19],
+[719558, 1970, 1, 31],
+[719580, 1970, 2, 22],
+[719623, 1970, 4, 6],
+[719732, 1970, 7, 24],
+[719795, 1970, 9, 25],
+[719824, 1970, 10, 24],
+[719955, 1971, 3, 4],
+[720091, 1971, 7, 18],
+[720225, 1971, 11, 29],
+[720282, 1972, 1, 25],
+[720299, 1972, 2, 11],
+[720395, 1972, 5, 17],
+[720402, 1972, 5, 24],
+[720570, 1972, 11, 8],
+[720684, 1973, 3, 2],
+[720756, 1973, 5, 13],
+[720941, 1973, 11, 14],
+[721122, 1974, 5, 14],
+[721154, 1974, 6, 15],
+[721283, 1974, 10, 22],
+[721438, 1975, 3, 26],
+[721616, 1975, 9, 20],
+[721745, 1976, 1, 27],
+[721792, 1976, 3, 14],
+[721829, 1976, 4, 20],
+[721984, 1976, 9, 22],
+[722045, 1976, 11, 22],
+[722143, 1977, 2, 28],
+[722288, 1977, 7, 23],
+[722371, 1977, 10, 14],
+[722561, 1978, 4, 22],
+[722700, 1978, 9, 8],
+[722722, 1978, 9, 30],
+[722866, 1979, 2, 21],
+[723044, 1979, 8, 18],
+[723054, 1979, 8, 28],
+[723084, 1979, 9, 27],
+[723148, 1979, 11, 30],
+[723334, 1980, 6, 3],
+[723509, 1980, 11, 25],
+[723652, 1981, 4, 17],
+[723832, 1981, 10, 14],
+[724028, 1982, 4, 28],
+[724042, 1982, 5, 12],
+[724178, 1982, 9, 25],
+[724320, 1983, 2, 14],
+[724438, 1983, 6, 12],
+[724596, 1983, 11, 17],
+[724693, 1984, 2, 22],
+[724742, 1984, 4, 11],
+[724865, 1984, 8, 12],
+[724912, 1984, 9, 28],
+[724926, 1984, 10, 12],
+[724930, 1984, 10, 16],
+[724938, 1984, 10, 24],
+[725062, 1985, 2, 25],
+[725067, 1985, 3, 2],
+[725242, 1985, 8, 24],
+[725265, 1985, 9, 16],
+[725385, 1986, 1, 14],
+[725555, 1986, 7, 3],
+[725615, 1986, 9, 1],
+[725747, 1987, 1, 11],
+[725754, 1987, 1, 18],
+[725932, 1987, 7, 15],
+[726014, 1987, 10, 5],
+[726138, 1988, 2, 6],
+[726288, 1988, 7, 5],
+[726390, 1988, 10, 15],
+[726574, 1989, 4, 17],
+[726719, 1989, 9, 9],
+[726802, 1989, 12, 1],
+[726953, 1990, 5, 1],
+[727099, 1990, 9, 24],
+[727157, 1990, 11, 21],
+[727250, 1991, 2, 22],
+[727394, 1991, 7, 16],
+[727581, 1992, 1, 19],
+[727729, 1992, 6, 15],
+[727926, 1992, 12, 29],
+[728032, 1993, 4, 14],
+[728082, 1993, 6, 3],
+[728210, 1993, 10, 9],
+[728274, 1993, 12, 12],
+[728344, 1994, 2, 20],
+[728540, 1994, 9, 4],
+[728546, 1994, 9, 10],
+[728624, 1994, 11, 27],
+[728629, 1994, 12, 2],
+[728647, 1994, 12, 20],
+[728649, 1994, 12, 22],
+[728671, 1995, 1, 13],
+[728859, 1995, 7, 20],
+[728967, 1995, 11, 5],
+[729141, 1996, 4, 27],
+[729278, 1996, 9, 11],
+[729461, 1997, 3, 13],
+[729539, 1997, 5, 30],
+[729563, 1997, 6, 23],
+[729634, 1997, 9, 2],
+[729786, 1998, 2, 1],
+[729882, 1998, 5, 8],
+[730022, 1998, 9, 25],
+[730115, 1998, 12, 27],
+[730184, 1999, 3, 6],
+[730383, 1999, 9, 21],
+[730469, 1999, 12, 16],
+[730554, 2000, 3, 10],
+[730745, 2000, 9, 17],
+[730891, 2001, 2, 10],
+[730995, 2001, 5, 25],
+[731002, 2001, 6, 1],
+[731050, 2001, 7, 19],
+[731154, 2001, 10, 31],
+[731299, 2002, 3, 25],
+[731348, 2002, 5, 13],
+[731541, 2002, 11, 22],
+[731692, 2003, 4, 22],
+[731779, 2003, 7, 18],
+[731800, 2003, 8, 8],
+[731978, 2004, 2, 2],
+[732056, 2004, 4, 20],
+[732099, 2004, 6, 2],
+[732108, 2004, 6, 11],
+[732296, 2004, 12, 16],
+[732435, 2005, 5, 4],
+[732612, 2005, 10, 28],
+[732654, 2005, 12, 9],
+[732841, 2006, 6, 14],
+[732965, 2006, 10, 16],
+[733043, 2007, 1, 2],
+[733206, 2007, 6, 14],
+[733349, 2007, 11, 4],
+[733459, 2008, 2, 22],
+[733620, 2008, 8, 1],
+[733661, 2008, 9, 11],
+[733798, 2009, 1, 26],
+[733832, 2009, 3, 1],
+[733851, 2009, 3, 20],
+[734010, 2009, 8, 26],
+[734202, 2010, 3, 6],
+[734298, 2010, 6, 10],
+[734317, 2010, 6, 29],
+[734516, 2011, 1, 14],
+[734665, 2011, 6, 12],
+[734857, 2011, 12, 21],
+[734884, 2012, 1, 17],
+[734939, 2012, 3, 12],
+[735073, 2012, 7, 24],
+[735241, 2013, 1, 8],
+[735419, 2013, 7, 5],
+[735489, 2013, 9, 13],
+[735604, 2014, 1, 6],
+[735750, 2014, 6, 1],
+[735839, 2014, 8, 29],
+[736006, 2015, 2, 12],
+[736040, 2015, 3, 18],
+[736132, 2015, 6, 18],
+[736176, 2015, 8, 1],
+[736181, 2015, 8, 6],
+[736354, 2016, 1, 26],
+[736482, 2016, 6, 2],
+[736485, 2016, 6, 5],
+[736522, 2016, 7, 12],
+[736523, 2016, 7, 13],
+[736549, 2016, 8, 8],
+[736603, 2016, 10, 1],
+[736641, 2016, 11, 8],
+[736647, 2016, 11, 14],
+[736688, 2016, 12, 25],
+[736765, 2017, 3, 12],
+[736914, 2017, 8, 8],
+[736932, 2017, 8, 26],
+[737066, 2018, 1, 7],
+[737113, 2018, 2, 23],
+[737233, 2018, 6, 23],
+[737382, 2018, 11, 19],
+[737557, 2019, 5, 13],
+[737586, 2019, 6, 11],
+[737700, 2019, 10, 3],
+[737724, 2019, 10, 27],
+[737735, 2019, 11, 7],
+[737736, 2019, 11, 8],
+[737810, 2020, 1, 21],
+[737885, 2020, 4, 5],
+[738021, 2020, 8, 19],
+[738116, 2020, 11, 22],
+[738306, 2021, 5, 31],
+[738374, 2021, 8, 7],
+[738521, 2022, 1, 1],
+[738546, 2022, 1, 26],
+[738739, 2022, 8, 7],
+[738904, 2023, 1, 19],
+[738965, 2023, 3, 21],
+[739009, 2023, 5, 4],
+[739127, 2023, 8, 30],
+[739243, 2023, 12, 24],
+[739401, 2024, 5, 30],
+[739573, 2024, 11, 18],
+[739581, 2024, 11, 26],
+[739611, 2024, 12, 26],
+[739684, 2025, 3, 9],
+[739755, 2025, 5, 19],
+[739896, 2025, 10, 7],
+[740083, 2026, 4, 12],
+[740134, 2026, 6, 2],
+[740317, 2026, 12, 2],
+[740396, 2027, 2, 19],
+[740536, 2027, 7, 9],
+[740576, 2027, 8, 18],
+[740650, 2027, 10, 31],
+[740796, 2028, 3, 25],
+[740850, 2028, 5, 18],
+[740965, 2028, 9, 10],
+[740999, 2028, 10, 14],
+[741100, 2029, 1, 23],
+[741125, 2029, 2, 17],
+[741266, 2029, 7, 8],
+[741434, 2029, 12, 23],
+[741541, 2030, 4, 9],
+[741615, 2030, 6, 22],
+[741666, 2030, 8, 12],
+[741863, 2031, 2, 25],
+[741880, 2031, 3, 14],
+[741987, 2031, 6, 29],
+[742020, 2031, 8, 1],
+[742143, 2031, 12, 2],
+[742233, 2032, 3, 1],
+[742359, 2032, 7, 5],
+[742518, 2032, 12, 11],
+[742590, 2033, 2, 21],
+[742761, 2033, 8, 11],
+[742953, 2034, 2, 19],
+[743092, 2034, 7, 8],
+[743279, 2035, 1, 11],
+[743302, 2035, 2, 3],
+[743467, 2035, 7, 18],
+[743515, 2035, 9, 4],
+[743552, 2035, 10, 11],
+[743661, 2036, 1, 28],
+[743812, 2036, 6, 27],
+[743891, 2036, 9, 14],
+[743997, 2036, 12, 29],
+[744108, 2037, 4, 19],
+[744155, 2037, 6, 5],
+[744320, 2037, 11, 17],
+[744520, 2038, 6, 5],
+[744598, 2038, 8, 22],
+[744695, 2038, 11, 27],
+[744854, 2039, 5, 5],
+[744904, 2039, 6, 24],
+[744923, 2039, 7, 13],
+[745072, 2039, 12, 9],
+[745085, 2039, 12, 22],
+[745171, 2040, 3, 17],
+[745371, 2040, 10, 3],
+[745539, 2041, 3, 20],
+[745585, 2041, 5, 5],
+[745678, 2041, 8, 6],
+[745856, 2042, 1, 31],
+[745915, 2042, 3, 31],
+[745964, 2042, 5, 19],
+[746020, 2042, 7, 14],
+[746148, 2042, 11, 19],
+[746202, 2043, 1, 12],
+[746343, 2043, 6, 2],
+[746483, 2043, 10, 20],
+[746608, 2044, 2, 22],
+[746699, 2044, 5, 23],
+[746844, 2044, 10, 15],
+[747028, 2045, 4, 17],
+[747035, 2045, 4, 24],
+[747174, 2045, 9, 10],
+[747256, 2045, 12, 1],
+[747428, 2046, 5, 22],
+[747510, 2046, 8, 12],
+[747701, 2047, 2, 19],
+[747703, 2047, 2, 21],
+[747766, 2047, 4, 25],
+[747940, 2047, 10, 16],
+[748093, 2048, 3, 17],
+[748225, 2048, 7, 27],
+[748280, 2048, 9, 20],
+[748293, 2048, 10, 3],
+[748467, 2049, 3, 26],
+[748641, 2049, 9, 16],
+[748698, 2049, 11, 12],
+[748827, 2050, 3, 21],
+[748870, 2050, 5, 3],
+[749041, 2050, 10, 21],
+[749130, 2051, 1, 18],
+[749283, 2051, 6, 20],
+[749328, 2051, 8, 4],
+[749486, 2052, 1, 9],
+[749633, 2052, 6, 4],
+[749791, 2052, 11, 9],
+[749810, 2052, 11, 28],
+[749834, 2052, 12, 22],
+[749884, 2053, 2, 10],
+[749993, 2053, 5, 30],
+[750002, 2053, 6, 8],
+[750093, 2053, 9, 7],
+[750275, 2054, 3, 8],
+[750399, 2054, 7, 10],
+[750550, 2054, 12, 8],
+[750663, 2055, 3, 31],
+[750856, 2055, 10, 10],
+[751008, 2056, 3, 10],
+[751118, 2056, 6, 28],
+[751134, 2056, 7, 14],
+[751193, 2056, 9, 11],
+[751268, 2056, 11, 25],
+[751440, 2057, 5, 16],
+[751530, 2057, 8, 14],
+[751534, 2057, 8, 18],
+[751637, 2057, 11, 29],
+[751652, 2057, 12, 14],
+[751669, 2057, 12, 31],
+[751692, 2058, 1, 23],
+[751780, 2058, 4, 21],
+[751803, 2058, 5, 14],
+[751976, 2058, 11, 3],
+[752056, 2059, 1, 22],
+[752177, 2059, 5, 23],
+[752253, 2059, 8, 7],
+[752256, 2059, 8, 10],
+[752276, 2059, 8, 30],
+[752345, 2059, 11, 7],
+[752465, 2060, 3, 6],
+[752487, 2060, 3, 28],
+[752578, 2060, 6, 27],
+[752626, 2060, 8, 14],
+[752778, 2061, 1, 13],
+[752870, 2061, 4, 15],
+[752964, 2061, 7, 18],
+[753007, 2061, 8, 30],
+[753070, 2061, 11, 1],
+[753114, 2061, 12, 15],
+[753264, 2062, 5, 14],
+[753379, 2062, 9, 6],
+[753495, 2062, 12, 31],
+[753523, 2063, 1, 28],
+[753593, 2063, 4, 8],
+[753628, 2063, 5, 13],
+[753810, 2063, 11, 11],
+[754001, 2064, 5, 20],
+[754199, 2064, 12, 4],
+[754248, 2065, 1, 22],
+[754302, 2065, 3, 17],
+[754312, 2065, 3, 27],
+[754503, 2065, 10, 4],
+[754566, 2065, 12, 6],
+[754748, 2066, 6, 6],
+[754750, 2066, 6, 8],
+[754774, 2066, 7, 2],
+[754862, 2066, 9, 28],
+[754986, 2067, 1, 30],
+[755042, 2067, 3, 27],
+[755082, 2067, 5, 6],
+[755250, 2067, 10, 21],
+[755437, 2068, 4, 25],
+[755602, 2068, 10, 7],
+[755692, 2069, 1, 5],
+[755825, 2069, 5, 18],
+[755928, 2069, 8, 29],
+[755971, 2069, 10, 11],
+[756112, 2070, 3, 1],
+[756152, 2070, 4, 10],
+[756331, 2070, 10, 6],
+[756504, 2071, 3, 28],
+[756593, 2071, 6, 25],
+[756751, 2071, 11, 30],
+[756755, 2071, 12, 4],
+[756759, 2071, 12, 8],
+[756902, 2072, 4, 29],
+[756945, 2072, 6, 11],
+[757006, 2072, 8, 11],
+[757052, 2072, 9, 26],
+[757135, 2072, 12, 18],
+[757312, 2073, 6, 13],
+[757314, 2073, 6, 15],
+[757466, 2073, 11, 14],
+[757612, 2074, 4, 9],
+[757704, 2074, 7, 10],
+[757834, 2074, 11, 17],
+[757889, 2075, 1, 11],
+[757921, 2075, 2, 12],
+[757925, 2075, 2, 16],
+[758045, 2075, 6, 16],
+[758065, 2075, 7, 6],
+[758263, 2076, 1, 20],
+[758402, 2076, 6, 7],
+[758530, 2076, 10, 13],
+[758615, 2077, 1, 6],
+[758674, 2077, 3, 6],
+[758761, 2077, 6, 1],
+[758853, 2077, 9, 1],
+[759002, 2078, 1, 28],
+[759004, 2078, 1, 30],
+[759135, 2078, 6, 10],
+[759156, 2078, 7, 1],
+[759248, 2078, 10, 1],
+[759412, 2079, 3, 14],
+[759515, 2079, 6, 25],
+[759636, 2079, 10, 24],
+[759736, 2080, 2, 1],
+[759912, 2080, 7, 26],
+[760080, 2081, 1, 10],
+[760143, 2081, 3, 14],
+[760250, 2081, 6, 29],
+[760282, 2081, 7, 31],
+[760473, 2082, 2, 7],
+[760586, 2082, 5, 31],
+[760767, 2082, 11, 28],
+[760836, 2083, 2, 5],
+[761013, 2083, 8, 1],
+[761053, 2083, 9, 10],
+[761134, 2083, 11, 30],
+[761154, 2083, 12, 20],
+[761252, 2084, 3, 27],
+[761423, 2084, 9, 14],
+[761586, 2085, 2, 24],
+[761780, 2085, 9, 6],
+[761979, 2086, 3, 24],
+[762126, 2086, 8, 18],
+[762282, 2087, 1, 21],
+[762427, 2087, 6, 15],
+[762506, 2087, 9, 2],
+[762564, 2087, 10, 30],
+[762597, 2087, 12, 2],
+[762731, 2088, 4, 14],
+[762823, 2088, 7, 15],
+[762905, 2088, 10, 5],
+[762996, 2089, 1, 4],
+[763115, 2089, 5, 3],
+[763244, 2089, 9, 9],
+[763308, 2089, 11, 12],
+[763364, 2090, 1, 7],
+[763458, 2090, 4, 11],
+[763539, 2090, 7, 1],
+[763596, 2090, 8, 27],
+[763634, 2090, 10, 4],
+[763683, 2090, 11, 22],
+[763854, 2091, 5, 12],
+[763871, 2091, 5, 29],
+[763946, 2091, 8, 12],
+[764027, 2091, 11, 1],
+[764041, 2091, 11, 15],
+[764102, 2092, 1, 15],
+[764172, 2092, 3, 25],
+[764182, 2092, 4, 4],
+[764250, 2092, 6, 11],
+[764348, 2092, 9, 17],
+[764401, 2092, 11, 9],
+[764542, 2093, 3, 30],
+[764543, 2093, 3, 31],
+[764571, 2093, 4, 28],
+[764572, 2093, 4, 29],
+[764604, 2093, 5, 31],
+[764631, 2093, 6, 27],
+[764680, 2093, 8, 15],
+[764690, 2093, 8, 25],
+[764757, 2093, 10, 31],
+[764857, 2094, 2, 8],
+[764993, 2094, 6, 24],
+[765101, 2094, 10, 10],
+[765258, 2095, 3, 16],
+[765307, 2095, 5, 4],
+[765469, 2095, 10, 13],
+[765629, 2096, 3, 21],
+[765725, 2096, 6, 25],
+[765742, 2096, 7, 12],
+[765752, 2096, 7, 22],
+[765888, 2096, 12, 5],
+[766068, 2097, 6, 3],
+[766139, 2097, 8, 13],
+[766211, 2097, 10, 24],
+[766233, 2097, 11, 15],
+[766346, 2098, 3, 8],
+[766418, 2098, 5, 19],
+[766528, 2098, 9, 6],
+[766588, 2098, 11, 5],
+[766755, 2099, 4, 21],
+[766774, 2099, 5, 10],
+[766863, 2099, 8, 7],
+[766943, 2099, 10, 26],
+[766953, 2099, 11, 5],
+[766989, 2099, 12, 11],
+[767145, 2100, 5, 16],
+[767151, 2100, 5, 22],
+[767217, 2100, 7, 27],
+[767286, 2100, 10, 4],
+[767305, 2100, 10, 23],
+[767429, 2101, 2, 24],
+[767508, 2101, 5, 14],
+[767579, 2101, 7, 24],
+[767751, 2102, 1, 12],
+[767919, 2102, 6, 29],
+[767958, 2102, 8, 7],
+[768090, 2102, 12, 17],
+[768251, 2103, 5, 27],
+[768405, 2103, 10, 28],
+[768543, 2104, 3, 14],
+[768714, 2104, 9, 1],
+[768857, 2105, 1, 22],
+[769001, 2105, 6, 15],
+[769084, 2105, 9, 6],
+[769104, 2105, 9, 26],
+[769167, 2105, 11, 28],
+[769340, 2106, 5, 20],
+[769452, 2106, 9, 9],
+[769529, 2106, 11, 25],
+[769718, 2107, 6, 2],
+[769741, 2107, 6, 25],
+[769779, 2107, 8, 2],
+[769847, 2107, 10, 9],
+[769961, 2108, 1, 31],
+[770048, 2108, 4, 27],
+[770098, 2108, 6, 16],
+[770295, 2108, 12, 30],
+[770378, 2109, 3, 23],
+[770461, 2109, 6, 14],
+[770653, 2109, 12, 23],
+[770822, 2110, 6, 10],
+[770919, 2110, 9, 15],
+[771047, 2111, 1, 21],
+[771208, 2111, 7, 1],
+[771319, 2111, 10, 20],
+[771446, 2112, 2, 24],
+[771574, 2112, 7, 1],
+[771742, 2112, 12, 16],
+[771765, 2113, 1, 8],
+[771808, 2113, 2, 20],
+[771904, 2113, 5, 27],
+[771934, 2113, 6, 26],
+[772040, 2113, 10, 10],
+[772058, 2113, 10, 28],
+[772212, 2114, 3, 31],
+[772261, 2114, 5, 19],
+[772349, 2114, 8, 15],
+[772472, 2114, 12, 16],
+[772578, 2115, 4, 1],
+[772617, 2115, 5, 10],
+[772741, 2115, 9, 11],
+[772761, 2115, 10, 1],
+[772854, 2116, 1, 2],
+[772951, 2116, 4, 8],
+[773117, 2116, 9, 21],
+[773266, 2117, 2, 17],
+[773299, 2117, 3, 22],
+[773388, 2117, 6, 19],
+[773507, 2117, 10, 16],
+[773575, 2117, 12, 23],
+[773750, 2118, 6, 16],
+[773946, 2118, 12, 29],
+[774040, 2119, 4, 2],
+[774195, 2119, 9, 4],
+[774342, 2120, 1, 29],
+[774509, 2120, 7, 14],
+[774603, 2120, 10, 16],
+[774624, 2120, 11, 6],
+[774815, 2121, 5, 16],
+[774984, 2121, 11, 1],
+[775029, 2121, 12, 16],
+[775164, 2122, 4, 30],
+[775191, 2122, 5, 27],
+[775349, 2122, 11, 1],
+[775526, 2123, 4, 27],
+[775673, 2123, 9, 21],
+[775789, 2124, 1, 15],
+[775873, 2124, 4, 8],
+[775884, 2124, 4, 19],
+[775885, 2124, 4, 20],
+[775964, 2124, 7, 8],
+[776076, 2124, 10, 28],
+[776096, 2124, 11, 17],
+[776107, 2124, 11, 28],
+[776110, 2124, 12, 1],
+[776228, 2125, 3, 29],
+[776292, 2125, 6, 1],
+[776420, 2125, 10, 7],
+[776511, 2126, 1, 6],
+[776548, 2126, 2, 12],
+[776648, 2126, 5, 23],
+[776733, 2126, 8, 16],
+[776741, 2126, 8, 24],
+[776810, 2126, 11, 1],
+[776915, 2127, 2, 14],
+[776982, 2127, 4, 22],
+[777049, 2127, 6, 28],
+[777104, 2127, 8, 22],
+[777209, 2127, 12, 5],
+[777227, 2127, 12, 23],
+[777295, 2128, 2, 29],
+[777384, 2128, 5, 28],
+[777555, 2128, 11, 15],
+[777731, 2129, 5, 10],
+[777847, 2129, 9, 3],
+[777898, 2129, 10, 24],
+[777926, 2129, 11, 21],
+[778017, 2130, 2, 20],
+[778063, 2130, 4, 7],
+[778233, 2130, 9, 24],
+[778361, 2131, 1, 30],
+[778452, 2131, 5, 1],
+[778555, 2131, 8, 12],
+[778733, 2132, 2, 6],
+[778823, 2132, 5, 6],
+[778888, 2132, 7, 10],
+[778945, 2132, 9, 5],
+[779049, 2132, 12, 18],
+[779062, 2132, 12, 31],
+[779157, 2133, 4, 5],
+[779356, 2133, 10, 21],
+[779520, 2134, 4, 3],
+[779676, 2134, 9, 6],
+[779768, 2134, 12, 7],
+[779918, 2135, 5, 6],
+[780004, 2135, 7, 31],
+[780161, 2136, 1, 4],
+[780329, 2136, 6, 20],
+[780496, 2136, 12, 4],
+[780530, 2137, 1, 7],
+[780706, 2137, 7, 2],
+[780750, 2137, 8, 15],
+[780764, 2137, 8, 29],
+[780846, 2137, 11, 19],
+[781025, 2138, 5, 17],
+[781091, 2138, 7, 22],
+[781096, 2138, 7, 27],
+[781198, 2138, 11, 6],
+[781226, 2138, 12, 4],
+[781348, 2139, 4, 5],
+[781547, 2139, 10, 21],
+[781562, 2139, 11, 5],
+[781597, 2139, 12, 10],
+[781764, 2140, 5, 25],
+[781808, 2140, 7, 8],
+[781941, 2140, 11, 18],
+[782103, 2141, 4, 29],
+[782239, 2141, 9, 12],
+[782396, 2142, 2, 16],
+[782579, 2142, 8, 18],
+[782698, 2142, 12, 15],
+[782719, 2143, 1, 5],
+[782860, 2143, 5, 26],
+[782990, 2143, 10, 3],
+[783027, 2143, 11, 9],
+[783202, 2144, 5, 2],
+[783259, 2144, 6, 28],
+[783319, 2144, 8, 27],
+[783489, 2145, 2, 13],
+[783608, 2145, 6, 12],
+[783679, 2145, 8, 22],
+[783741, 2145, 10, 23],
+[783936, 2146, 5, 6],
+[784029, 2146, 8, 7],
+[784033, 2146, 8, 11],
+[784135, 2146, 11, 21],
+[784181, 2147, 1, 6],
+[784340, 2147, 6, 14],
+[784420, 2147, 9, 2],
+[784516, 2147, 12, 7],
+[784518, 2147, 12, 9],
+[784632, 2148, 4, 1],
+[784783, 2148, 8, 30],
+[784787, 2148, 9, 3],
+[784968, 2149, 3, 3],
+[785067, 2149, 6, 10],
+[785243, 2149, 12, 3],
+[785399, 2150, 5, 8],
+[785531, 2150, 9, 17],
+[785696, 2151, 3, 1],
+[785840, 2151, 7, 23],
+[786033, 2152, 2, 1],
+[786098, 2152, 4, 6],
+[786184, 2152, 7, 1],
+[786202, 2152, 7, 19],
+[786385, 2153, 1, 18],
+[786463, 2153, 4, 6],
+[786577, 2153, 7, 29],
+[786697, 2153, 11, 26],
+[786848, 2154, 4, 26],
+[787023, 2154, 10, 18],
+[787153, 2155, 2, 25],
+[787166, 2155, 3, 10],
+[787295, 2155, 7, 17],
+[787421, 2155, 11, 20],
+[787448, 2155, 12, 17],
+[787615, 2156, 6, 1],
+[787759, 2156, 10, 23],
+[787800, 2156, 12, 3],
+[787846, 2157, 1, 18],
+[788022, 2157, 7, 13],
+[788063, 2157, 8, 23],
+[788261, 2158, 3, 9],
+[788277, 2158, 3, 25],
+[788425, 2158, 8, 20],
+[788602, 2159, 2, 13],
+[788734, 2159, 6, 25],
+[788872, 2159, 11, 10],
+[788903, 2159, 12, 11],
+[789025, 2160, 4, 11],
+[789094, 2160, 6, 19],
+[789215, 2160, 10, 18],
+[789320, 2161, 1, 31],
+[789433, 2161, 5, 24],
+[789504, 2161, 8, 3],
+[789681, 2162, 1, 27],
+[789685, 2162, 1, 31],
+[789786, 2162, 5, 12],
+[789901, 2162, 9, 4],
+[789981, 2162, 11, 23],
+[790123, 2163, 4, 14],
+[790198, 2163, 6, 28],
+[790237, 2163, 8, 6],
+[790353, 2163, 11, 30],
+[790474, 2164, 3, 30],
+[790508, 2164, 5, 3],
+[790589, 2164, 7, 23],
+[790707, 2164, 11, 18],
+[790865, 2165, 4, 25],
+[790984, 2165, 8, 22],
+[791138, 2166, 1, 23],
+[791308, 2166, 7, 12],
+[791493, 2167, 1, 13],
+[791518, 2167, 2, 7],
+[791636, 2167, 6, 5],
+[791666, 2167, 7, 5],
+[791737, 2167, 9, 14],
+[791898, 2168, 2, 22],
+[792069, 2168, 8, 11],
+[792234, 2169, 1, 23],
+[792259, 2169, 2, 17],
+[792263, 2169, 2, 21],
+[792317, 2169, 4, 16],
+[792492, 2169, 10, 8],
+[792658, 2170, 3, 23],
+[792681, 2170, 4, 15],
+[792780, 2170, 7, 23],
+[792890, 2170, 11, 10],
+[792965, 2171, 1, 24],
+[793165, 2171, 8, 12],
+[793349, 2172, 2, 12],
+[793351, 2172, 2, 14],
+[793531, 2172, 8, 12],
+[793577, 2172, 9, 27],
+[793749, 2173, 3, 18],
+[793867, 2173, 7, 14],
+[793909, 2173, 8, 25],
+[794082, 2174, 2, 14],
+[794143, 2174, 4, 16],
+[794207, 2174, 6, 19],
+[794296, 2174, 9, 16],
+[794362, 2174, 11, 21],
+[794414, 2175, 1, 12],
+[794552, 2175, 5, 30],
+[794571, 2175, 6, 18],
+[794672, 2175, 9, 27],
+[794797, 2176, 1, 30],
+[794930, 2176, 6, 11],
+[795127, 2176, 12, 25],
+[795251, 2177, 4, 28],
+[795350, 2177, 8, 5],
+[795463, 2177, 11, 26],
+[795477, 2177, 12, 10],
+[795558, 2178, 3, 1],
+[795609, 2178, 4, 21],
+[795759, 2178, 9, 18],
+[795933, 2179, 3, 11],
+[795938, 2179, 3, 16],
+[796130, 2179, 9, 24],
+[796288, 2180, 2, 29],
+[796455, 2180, 8, 14],
+[796598, 2181, 1, 4],
+[796742, 2181, 5, 28],
+[796905, 2181, 11, 7],
+[796925, 2181, 11, 27],
+[796970, 2182, 1, 11],
+[797022, 2182, 3, 4],
+[797175, 2182, 8, 4],
+[797237, 2182, 10, 5],
+[797282, 2182, 11, 19],
+[797343, 2183, 1, 19],
+[797394, 2183, 3, 11],
+[797564, 2183, 8, 28],
+[797701, 2184, 1, 12],
+[797714, 2184, 1, 25],
+[797744, 2184, 2, 24],
+[797811, 2184, 5, 1],
+[797972, 2184, 10, 9],
+[798049, 2184, 12, 25],
+[798182, 2185, 5, 7],
+[798312, 2185, 9, 14],
+[798337, 2185, 10, 9],
+[798396, 2185, 12, 7],
+[798511, 2186, 4, 1],
+[798585, 2186, 6, 14],
+[798705, 2186, 10, 12],
+[798831, 2187, 2, 15],
+[799003, 2187, 8, 6],
+[799126, 2187, 12, 7],
+[799323, 2188, 6, 21],
+[799359, 2188, 7, 27],
+[799540, 2189, 1, 24],
+[799706, 2189, 7, 9],
+[799762, 2189, 9, 3],
+[799773, 2189, 9, 14],
+[799951, 2190, 3, 11],
+[799991, 2190, 4, 20],
+[800085, 2190, 7, 23],
+[800121, 2190, 8, 28],
+[800259, 2191, 1, 13],
+[800364, 2191, 4, 28],
+[800549, 2191, 10, 30],
+[800728, 2192, 4, 26],
+[800892, 2192, 10, 7],
+[800938, 2192, 11, 22],
+[801129, 2193, 6, 1],
+[801232, 2193, 9, 12],
+[801265, 2193, 10, 15],
+[801447, 2194, 4, 15],
+[801532, 2194, 7, 9],
+[801646, 2194, 10, 31],
+[801705, 2194, 12, 29],
+[801892, 2195, 7, 4],
+[801973, 2195, 9, 23],
+[801995, 2195, 10, 15],
+[802139, 2196, 3, 7],
+[802243, 2196, 6, 19],
+[802406, 2196, 11, 29],
+[802480, 2197, 2, 11],
+[802559, 2197, 5, 1],
+[802655, 2197, 8, 5],
+[802735, 2197, 10, 24],
+[802830, 2198, 1, 27],
+[802833, 2198, 1, 30],
+[802839, 2198, 2, 5],
+[803037, 2198, 8, 22],
+[803139, 2198, 12, 2],
+[803207, 2199, 2, 8],
+[803341, 2199, 6, 22],
+[803479, 2199, 11, 7],
+[803679, 2200, 5, 26],
+[803737, 2200, 7, 23],
+[803775, 2200, 8, 30],
+[803914, 2201, 1, 16],
+[803976, 2201, 3, 19],
+[804027, 2201, 5, 9],
+[804144, 2201, 9, 3],
+[804257, 2201, 12, 25],
+[804373, 2202, 4, 20],
+[804402, 2202, 5, 19],
+[804482, 2202, 8, 7],
+[804603, 2202, 12, 6],
+[804736, 2203, 4, 18],
+[804747, 2203, 4, 29],
+[804881, 2203, 9, 10],
+[805059, 2204, 3, 6],
+[805077, 2204, 3, 24],
+[805146, 2204, 6, 1],
+[805250, 2204, 9, 13],
+[805268, 2204, 10, 1],
+[805442, 2205, 3, 24],
+[805592, 2205, 8, 21],
+[805702, 2205, 12, 9],
+[805748, 2206, 1, 24],
+[805848, 2206, 5, 4],
+[805868, 2206, 5, 24],
+[806052, 2206, 11, 24],
+[806095, 2207, 1, 6],
+[806200, 2207, 4, 21],
+[806321, 2207, 8, 20],
+[806503, 2208, 2, 18],
+[806673, 2208, 8, 6],
+[806686, 2208, 8, 19],
+[806759, 2208, 10, 31],
+[806785, 2208, 11, 26],
+[806846, 2209, 1, 26],
+[806914, 2209, 4, 4],
+[807038, 2209, 8, 6],
+[807226, 2210, 2, 10],
+[807365, 2210, 6, 29],
+[807460, 2210, 10, 2],
+[807474, 2210, 10, 16],
+[807584, 2211, 2, 3],
+[807756, 2211, 7, 25],
+[807825, 2211, 10, 2],
+[807913, 2211, 12, 29],
+[808060, 2212, 5, 24],
+[808223, 2212, 11, 3],
+[808282, 2213, 1, 1],
+[808412, 2213, 5, 11],
+[808437, 2213, 6, 5],
+[808532, 2213, 9, 8],
+[808560, 2213, 10, 6],
+[808658, 2214, 1, 12],
+[808742, 2214, 4, 6],
+[808860, 2214, 8, 2],
+[808939, 2214, 10, 20],
+[809027, 2215, 1, 16],
+[809192, 2215, 6, 30],
+[809354, 2215, 12, 9],
+[809474, 2216, 4, 7],
+[809525, 2216, 5, 28],
+[809649, 2216, 9, 29],
+[809757, 2217, 1, 15],
+[809780, 2217, 2, 7],
+[809857, 2217, 4, 25],
+[809958, 2217, 8, 4],
+[810126, 2218, 1, 19],
+[810162, 2218, 2, 24],
+[810188, 2218, 3, 22],
+[810269, 2218, 6, 11],
+[810378, 2218, 9, 28],
+[810422, 2218, 11, 11],
+[810508, 2219, 2, 5],
+[810540, 2219, 3, 9],
+[810707, 2219, 8, 23],
+[810761, 2219, 10, 16],
+[810888, 2220, 2, 20],
+[811066, 2220, 8, 16],
+[811178, 2220, 12, 6],
+[811205, 2221, 1, 2],
+[811391, 2221, 7, 7],
+[811533, 2221, 11, 26],
+[811691, 2222, 5, 3],
+[811775, 2222, 7, 26],
+[811895, 2222, 11, 23],
+[812019, 2223, 3, 27],
+[812144, 2223, 7, 30],
+[812274, 2223, 12, 7],
+[812275, 2223, 12, 8],
+[812406, 2224, 4, 17],
+[812554, 2224, 9, 12],
+[812721, 2225, 2, 26],
+[812897, 2225, 8, 21],
+[813053, 2226, 1, 24],
+[813252, 2226, 8, 11],
+[813360, 2226, 11, 27],
+[813385, 2226, 12, 22],
+[813529, 2227, 5, 15],
+[813548, 2227, 6, 3],
+[813566, 2227, 6, 21],
+[813693, 2227, 10, 26],
+[813808, 2228, 2, 18],
+[813818, 2228, 2, 28],
+[813879, 2228, 4, 29],
+[813972, 2228, 7, 31],
+[814080, 2228, 11, 16],
+[814132, 2229, 1, 7],
+[814248, 2229, 5, 3],
+[814268, 2229, 5, 23],
+[814331, 2229, 7, 25],
+[814451, 2229, 11, 22],
+[814455, 2229, 11, 26],
+[814592, 2230, 4, 12],
+[814713, 2230, 8, 11],
+[814788, 2230, 10, 25],
+[814899, 2231, 2, 13],
+[814978, 2231, 5, 3],
+[815028, 2231, 6, 22],
+[815083, 2231, 8, 16],
+[815151, 2231, 10, 23],
+[815248, 2232, 1, 28],
+[815333, 2232, 4, 22],
+[815429, 2232, 7, 27],
+[815609, 2233, 1, 23],
+[815772, 2233, 7, 5],
+[815878, 2233, 10, 19],
+[815911, 2233, 11, 21],
+[815942, 2233, 12, 22],
+[816116, 2234, 6, 14],
+[816195, 2234, 9, 1],
+[816218, 2234, 9, 24],
+[816318, 2235, 1, 2],
+[816511, 2235, 7, 14],
+[816521, 2235, 7, 24],
+[816536, 2235, 8, 8],
+[816605, 2235, 10, 16],
+[816631, 2235, 11, 11],
+[816702, 2236, 1, 21],
+[816900, 2236, 8, 6],
+[817062, 2237, 1, 15],
+[817223, 2237, 6, 25],
+[817385, 2237, 12, 4],
+[817504, 2238, 4, 2],
+[817532, 2238, 4, 30],
+[817675, 2238, 9, 20],
+[817689, 2238, 10, 4],
+[817854, 2239, 3, 18],
+[818017, 2239, 8, 28],
+[818153, 2240, 1, 11],
+[818255, 2240, 4, 22],
+[818416, 2240, 9, 30],
+[818607, 2241, 4, 9],
+[818781, 2241, 9, 30],
+[818900, 2242, 1, 27],
+[818975, 2242, 4, 12],
+[819127, 2242, 9, 11],
+[819130, 2242, 9, 14],
+[819171, 2242, 10, 25],
+[819280, 2243, 2, 11],
+[819333, 2243, 4, 5],
+[819452, 2243, 8, 2],
+[819571, 2243, 11, 29],
+[819678, 2244, 3, 15],
+[819702, 2244, 4, 8],
+[819799, 2244, 7, 14],
+[819937, 2244, 11, 29],
+[820005, 2245, 2, 5],
+[820148, 2245, 6, 28],
+[820299, 2245, 11, 26],
+[820337, 2246, 1, 3],
+[820379, 2246, 2, 14],
+[820441, 2246, 4, 17],
+[820531, 2246, 7, 16],
+[820565, 2246, 8, 19],
+[820736, 2247, 2, 6],
+[820895, 2247, 7, 15],
+[820999, 2247, 10, 27],
+[821186, 2248, 5, 1],
+[821205, 2248, 5, 20],
+[821217, 2248, 6, 1],
+[821358, 2248, 10, 20],
+[821393, 2248, 11, 24],
+[821532, 2249, 4, 12],
+[821568, 2249, 5, 18],
+[821735, 2249, 11, 1],
+[821867, 2250, 3, 13],
+[821884, 2250, 3, 30]]
diff --git a/go/mysql/datetime/testdata/year_to_daynr.json b/go/mysql/datetime/testdata/year_to_daynr.json
new file mode 100644
index 00000000000..43914806d21
--- /dev/null
+++ b/go/mysql/datetime/testdata/year_to_daynr.json
@@ -0,0 +1 @@
+[1, 366, 731, 1096, 1461, 1827, 2192, 2557, 2922, 3288, 3653, 4018, 4383, 4749, 5114, 5479, 5844, 6210, 6575, 6940, 7305, 7671, 8036, 8401, 8766, 9132, 9497, 9862, 10227, 10593, 10958, 11323, 11688, 12054, 12419, 12784, 13149, 13515, 13880, 14245, 14610, 14976, 15341, 15706, 16071, 16437, 16802, 17167, 17532, 17898, 18263, 18628, 18993, 19359, 19724, 20089, 20454, 20820, 21185, 21550, 21915, 22281, 22646, 23011, 23376, 23742, 24107, 24472, 24837, 25203, 25568, 25933, 26298, 26664, 27029, 27394, 27759, 28125, 28490, 28855, 29220, 29586, 29951, 30316, 30681, 31047, 31412, 31777, 32142, 32508, 32873, 33238, 33603, 33969, 34334, 34699, 35064, 35430, 35795, 36160, 36525, 36890, 37255, 37620, 37985, 38351, 38716, 39081, 39446, 39812, 40177, 40542, 40907, 41273, 41638, 42003, 42368, 42734, 43099, 43464, 43829, 44195, 44560, 44925, 45290, 45656, 46021, 46386, 46751, 47117, 47482, 47847, 48212, 48578, 48943, 49308, 49673, 50039, 50404, 50769, 51134, 51500, 51865, 52230, 52595, 52961, 53326, 53691, 54056, 54422, 54787, 55152, 55517, 55883, 56248, 56613, 56978, 57344, 57709, 58074, 58439, 58805, 59170, 59535, 59900, 60266, 60631, 60996, 61361, 61727, 62092, 62457, 62822, 63188, 63553, 63918, 64283, 64649, 65014, 65379, 65744, 66110, 66475, 66840, 67205, 67571, 67936, 68301, 68666, 69032, 69397, 69762, 70127, 70493, 70858, 71223, 71588, 71954, 72319, 72684, 73049, 73414, 73779, 74144, 74509, 74875, 75240, 75605, 75970, 76336, 76701, 77066, 77431, 77797, 78162, 78527, 78892, 79258, 79623, 79988, 80353, 80719, 81084, 81449, 81814, 82180, 82545, 82910, 83275, 83641, 84006, 84371, 84736, 85102, 85467, 85832, 86197, 86563, 86928, 87293, 87658, 88024, 88389, 88754, 89119, 89485, 89850, 90215, 90580, 90946, 91311, 91676, 92041, 92407, 92772, 93137, 93502, 93868, 94233, 94598, 94963, 95329, 95694, 96059, 96424, 96790, 97155, 97520, 97885, 98251, 98616, 98981, 99346, 99712, 100077, 100442, 100807, 101173, 101538, 101903, 102268, 102634, 102999, 103364, 103729, 104095, 104460, 104825, 105190, 105556, 105921, 106286, 106651, 107017, 107382, 107747, 108112, 108478, 108843, 109208, 109573, 109938, 110303, 110668, 111033, 111399, 111764, 112129, 112494, 112860, 113225, 113590, 113955, 114321, 114686, 115051, 115416, 115782, 116147, 116512, 116877, 117243, 117608, 117973, 118338, 118704, 119069, 119434, 119799, 120165, 120530, 120895, 121260, 121626, 121991, 122356, 122721, 123087, 123452, 123817, 124182, 124548, 124913, 125278, 125643, 126009, 126374, 126739, 127104, 127470, 127835, 128200, 128565, 128931, 129296, 129661, 130026, 130392, 130757, 131122, 131487, 131853, 132218, 132583, 132948, 133314, 133679, 134044, 134409, 134775, 135140, 135505, 135870, 136236, 136601, 136966, 137331, 137697, 138062, 138427, 138792, 139158, 139523, 139888, 140253, 140619, 140984, 141349, 141714, 142080, 142445, 142810, 143175, 143541, 143906, 144271, 144636, 145002, 145367, 145732, 146097, 146463, 146828, 147193, 147558, 147924, 148289, 148654, 149019, 149385, 149750, 150115, 150480, 150846, 151211, 151576, 151941, 152307, 152672, 153037, 153402, 153768, 154133, 154498, 154863, 155229, 155594, 155959, 156324, 156690, 157055, 157420, 157785, 158151, 158516, 158881, 159246, 159612, 159977, 160342, 160707, 161073, 161438, 161803, 162168, 162534, 162899, 163264, 163629, 163995, 164360, 164725, 165090, 165456, 165821, 166186, 166551, 166917, 167282, 167647, 168012, 168378, 168743, 169108, 169473, 169839, 170204, 170569, 170934, 171300, 171665, 172030, 172395, 172761, 173126, 173491, 173856, 174222, 174587, 174952, 175317, 175683, 176048, 176413, 176778, 177144, 177509, 177874, 178239, 178605, 178970, 179335, 179700, 180066, 180431, 180796, 181161, 181527, 181892, 182257, 182622, 182987, 183352, 183717, 184082, 184448, 184813, 185178, 185543, 185909, 186274, 186639, 187004, 187370, 187735, 188100, 188465, 188831, 189196, 189561, 189926, 190292, 190657, 191022, 191387, 191753, 192118, 192483, 192848, 193214, 193579, 193944, 194309, 194675, 195040, 195405, 195770, 196136, 196501, 196866, 197231, 197597, 197962, 198327, 198692, 199058, 199423, 199788, 200153, 200519, 200884, 201249, 201614, 201980, 202345, 202710, 203075, 203441, 203806, 204171, 204536, 204902, 205267, 205632, 205997, 206363, 206728, 207093, 207458, 207824, 208189, 208554, 208919, 209285, 209650, 210015, 210380, 210746, 211111, 211476, 211841, 212207, 212572, 212937, 213302, 213668, 214033, 214398, 214763, 215129, 215494, 215859, 216224, 216590, 216955, 217320, 217685, 218051, 218416, 218781, 219146, 219511, 219876, 220241, 220606, 220972, 221337, 221702, 222067, 222433, 222798, 223163, 223528, 223894, 224259, 224624, 224989, 225355, 225720, 226085, 226450, 226816, 227181, 227546, 227911, 228277, 228642, 229007, 229372, 229738, 230103, 230468, 230833, 231199, 231564, 231929, 232294, 232660, 233025, 233390, 233755, 234121, 234486, 234851, 235216, 235582, 235947, 236312, 236677, 237043, 237408, 237773, 238138, 238504, 238869, 239234, 239599, 239965, 240330, 240695, 241060, 241426, 241791, 242156, 242521, 242887, 243252, 243617, 243982, 244348, 244713, 245078, 245443, 245809, 246174, 246539, 246904, 247270, 247635, 248000, 248365, 248731, 249096, 249461, 249826, 250192, 250557, 250922, 251287, 251653, 252018, 252383, 252748, 253114, 253479, 253844, 254209, 254575, 254940, 255305, 255670, 256035, 256400, 256765, 257130, 257496, 257861, 258226, 258591, 258957, 259322, 259687, 260052, 260418, 260783, 261148, 261513, 261879, 262244, 262609, 262974, 263340, 263705, 264070, 264435, 264801, 265166, 265531, 265896, 266262, 266627, 266992, 267357, 267723, 268088, 268453, 268818, 269184, 269549, 269914, 270279, 270645, 271010, 271375, 271740, 272106, 272471, 272836, 273201, 273567, 273932, 274297, 274662, 275028, 275393, 275758, 276123, 276489, 276854, 277219, 277584, 277950, 278315, 278680, 279045, 279411, 279776, 280141, 280506, 280872, 281237, 281602, 281967, 282333, 282698, 283063, 283428, 283794, 284159, 284524, 284889, 285255, 285620, 285985, 286350, 286716, 287081, 287446, 287811, 288177, 288542, 288907, 289272, 289638, 290003, 290368, 290733, 291099, 291464, 291829, 292194, 292560, 292925, 293290, 293655, 294021, 294386, 294751, 295116, 295482, 295847, 296212, 296577, 296943, 297308, 297673, 298038, 298404, 298769, 299134, 299499, 299865, 300230, 300595, 300960, 301326, 301691, 302056, 302421, 302787, 303152, 303517, 303882, 304248, 304613, 304978, 305343, 305709, 306074, 306439, 306804, 307170, 307535, 307900, 308265, 308631, 308996, 309361, 309726, 310092, 310457, 310822, 311187, 311553, 311918, 312283, 312648, 313014, 313379, 313744, 314109, 314475, 314840, 315205, 315570, 315936, 316301, 316666, 317031, 317397, 317762, 318127, 318492, 318858, 319223, 319588, 319953, 320319, 320684, 321049, 321414, 321780, 322145, 322510, 322875, 323241, 323606, 323971, 324336, 324702, 325067, 325432, 325797, 326163, 326528, 326893, 327258, 327624, 327989, 328354, 328719, 329084, 329449, 329814, 330179, 330545, 330910, 331275, 331640, 332006, 332371, 332736, 333101, 333467, 333832, 334197, 334562, 334928, 335293, 335658, 336023, 336389, 336754, 337119, 337484, 337850, 338215, 338580, 338945, 339311, 339676, 340041, 340406, 340772, 341137, 341502, 341867, 342233, 342598, 342963, 343328, 343694, 344059, 344424, 344789, 345155, 345520, 345885, 346250, 346616, 346981, 347346, 347711, 348077, 348442, 348807, 349172, 349538, 349903, 350268, 350633, 350999, 351364, 351729, 352094, 352460, 352825, 353190, 353555, 353921, 354286, 354651, 355016, 355382, 355747, 356112, 356477, 356843, 357208, 357573, 357938, 358304, 358669, 359034, 359399, 359765, 360130, 360495, 360860, 361226, 361591, 361956, 362321, 362687, 363052, 363417, 363782, 364148, 364513, 364878, 365243, 365608, 365973, 366338, 366703, 367069, 367434, 367799, 368164, 368530, 368895, 369260, 369625, 369991, 370356, 370721, 371086, 371452, 371817, 372182, 372547, 372913, 373278, 373643, 374008, 374374, 374739, 375104, 375469, 375835, 376200, 376565, 376930, 377296, 377661, 378026, 378391, 378757, 379122, 379487, 379852, 380218, 380583, 380948, 381313, 381679, 382044, 382409, 382774, 383140, 383505, 383870, 384235, 384601, 384966, 385331, 385696, 386062, 386427, 386792, 387157, 387523, 387888, 388253, 388618, 388984, 389349, 389714, 390079, 390445, 390810, 391175, 391540, 391906, 392271, 392636, 393001, 393367, 393732, 394097, 394462, 394828, 395193, 395558, 395923, 396289, 396654, 397019, 397384, 397750, 398115, 398480, 398845, 399211, 399576, 399941, 400306, 400672, 401037, 401402, 401767, 402132, 402497, 402862, 403227, 403593, 403958, 404323, 404688, 405054, 405419, 405784, 406149, 406515, 406880, 407245, 407610, 407976, 408341, 408706, 409071, 409437, 409802, 410167, 410532, 410898, 411263, 411628, 411993, 412359, 412724, 413089, 413454, 413820, 414185, 414550, 414915, 415281, 415646, 416011, 416376, 416742, 417107, 417472, 417837, 418203, 418568, 418933, 419298, 419664, 420029, 420394, 420759, 421125, 421490, 421855, 422220, 422586, 422951, 423316, 423681, 424047, 424412, 424777, 425142, 425508, 425873, 426238, 426603, 426969, 427334, 427699, 428064, 428430, 428795, 429160, 429525, 429891, 430256, 430621, 430986, 431352, 431717, 432082, 432447, 432813, 433178, 433543, 433908, 434274, 434639, 435004, 435369, 435735, 436100, 436465, 436830, 437196, 437561, 437926, 438291, 438657, 439022, 439387, 439752, 440118, 440483, 440848, 441213, 441579, 441944, 442309, 442674, 443040, 443405, 443770, 444135, 444501, 444866, 445231, 445596, 445962, 446327, 446692, 447057, 447423, 447788, 448153, 448518, 448884, 449249, 449614, 449979, 450345, 450710, 451075, 451440, 451806, 452171, 452536, 452901, 453267, 453632, 453997, 454362, 454728, 455093, 455458, 455823, 456189, 456554, 456919, 457284, 457650, 458015, 458380, 458745, 459111, 459476, 459841, 460206, 460572, 460937, 461302, 461667, 462033, 462398, 462763, 463128, 463494, 463859, 464224, 464589, 464955, 465320, 465685, 466050, 466416, 466781, 467146, 467511, 467877, 468242, 468607, 468972, 469338, 469703, 470068, 470433, 470799, 471164, 471529, 471894, 472260, 472625, 472990, 473355, 473721, 474086, 474451, 474816, 475181, 475546, 475911, 476276, 476642, 477007, 477372, 477737, 478103, 478468, 478833, 479198, 479564, 479929, 480294, 480659, 481025, 481390, 481755, 482120, 482486, 482851, 483216, 483581, 483947, 484312, 484677, 485042, 485408, 485773, 486138, 486503, 486869, 487234, 487599, 487964, 488330, 488695, 489060, 489425, 489791, 490156, 490521, 490886, 491252, 491617, 491982, 492347, 492713, 493078, 493443, 493808, 494174, 494539, 494904, 495269, 495635, 496000, 496365, 496730, 497096, 497461, 497826, 498191, 498557, 498922, 499287, 499652, 500018, 500383, 500748, 501113, 501479, 501844, 502209, 502574, 502940, 503305, 503670, 504035, 504401, 504766, 505131, 505496, 505862, 506227, 506592, 506957, 507323, 507688, 508053, 508418, 508784, 509149, 509514, 509879, 510245, 510610, 510975, 511340, 511705, 512070, 512435, 512800, 513166, 513531, 513896, 514261, 514627, 514992, 515357, 515722, 516088, 516453, 516818, 517183, 517549, 517914, 518279, 518644, 519010, 519375, 519740, 520105, 520471, 520836, 521201, 521566, 521932, 522297, 522662, 523027, 523393, 523758, 524123, 524488, 524854, 525219, 525584, 525949, 526315, 526680, 527045, 527410, 527776, 528141, 528506, 528871, 529237, 529602, 529967, 530332, 530698, 531063, 531428, 531793, 532159, 532524, 532889, 533254, 533620, 533985, 534350, 534715, 535081, 535446, 535811, 536176, 536542, 536907, 537272, 537637, 538003, 538368, 538733, 539098, 539464, 539829, 540194, 540559, 540925, 541290, 541655, 542020, 542386, 542751, 543116, 543481, 543847, 544212, 544577, 544942, 545308, 545673, 546038, 546403, 546769, 547134, 547499, 547864, 548229, 548594, 548959, 549324, 549690, 550055, 550420, 550785, 551151, 551516, 551881, 552246, 552612, 552977, 553342, 553707, 554073, 554438, 554803, 555168, 555534, 555899, 556264, 556629, 556995, 557360, 557725, 558090, 558456, 558821, 559186, 559551, 559917, 560282, 560647, 561012, 561378, 561743, 562108, 562473, 562839, 563204, 563569, 563934, 564300, 564665, 565030, 565395, 565761, 566126, 566491, 566856, 567222, 567587, 567952, 568317, 568683, 569048, 569413, 569778, 570144, 570509, 570874, 571239, 571605, 571970, 572335, 572700, 573066, 573431, 573796, 574161, 574527, 574892, 575257, 575622, 575988, 576353, 576718, 577083, 577449, 577814, 578179, 578544, 578910, 579275, 579640, 580005, 580371, 580736, 581101, 581466, 581832, 582197, 582562, 582927, 583293, 583658, 584023, 584388, 584754, 585119, 585484, 585849, 586215, 586580, 586945, 587310, 587676, 588041, 588406, 588771, 589137, 589502, 589867, 590232, 590598, 590963, 591328, 591693, 592059, 592424, 592789, 593154, 593520, 593885, 594250, 594615, 594981, 595346, 595711, 596076, 596442, 596807, 597172, 597537, 597903, 598268, 598633, 598998, 599364, 599729, 600094, 600459, 600825, 601190, 601555, 601920, 602286, 602651, 603016, 603381, 603747, 604112, 604477, 604842, 605208, 605573, 605938, 606303, 606669, 607034, 607399, 607764, 608130, 608495, 608860, 609225, 609591, 609956, 610321, 610686, 611052, 611417, 611782, 612147, 612513, 612878, 613243, 613608, 613974, 614339, 614704, 615069, 615435, 615800, 616165, 616530, 616896, 617261, 617626, 617991, 618357, 618722, 619087, 619452, 619818, 620183, 620548, 620913, 621278, 621643, 622008, 622373, 622739, 623104, 623469, 623834, 624200, 624565, 624930, 625295, 625661, 626026, 626391, 626756, 627122, 627487, 627852, 628217, 628583, 628948, 629313, 629678, 630044, 630409, 630774, 631139, 631505, 631870, 632235, 632600, 632966, 633331, 633696, 634061, 634427, 634792, 635157, 635522, 635888, 636253, 636618, 636983, 637349, 637714, 638079, 638444, 638810, 639175, 639540, 639905, 640271, 640636, 641001, 641366, 641732, 642097, 642462, 642827, 643193, 643558, 643923, 644288, 644654, 645019, 645384, 645749, 646115, 646480, 646845, 647210, 647576, 647941, 648306, 648671, 649037, 649402, 649767, 650132, 650498, 650863, 651228, 651593, 651959, 652324, 652689, 653054, 653420, 653785, 654150, 654515, 654881, 655246, 655611, 655976, 656342, 656707, 657072, 657437, 657802, 658167, 658532, 658897, 659263, 659628, 659993, 660358, 660724, 661089, 661454, 661819, 662185, 662550, 662915, 663280, 663646, 664011, 664376, 664741, 665107, 665472, 665837, 666202, 666568, 666933, 667298, 667663, 668029, 668394, 668759, 669124, 669490, 669855, 670220, 670585, 670951, 671316, 671681, 672046, 672412, 672777, 673142, 673507, 673873, 674238, 674603, 674968, 675334, 675699, 676064, 676429, 676795, 677160, 677525, 677890, 678256, 678621, 678986, 679351, 679717, 680082, 680447, 680812, 681178, 681543, 681908, 682273, 682639, 683004, 683369, 683734, 684100, 684465, 684830, 685195, 685561, 685926, 686291, 686656, 687022, 687387, 687752, 688117, 688483, 688848, 689213, 689578, 689944, 690309, 690674, 691039, 691405, 691770, 692135, 692500, 692866, 693231, 693596, 693961, 694326, 694691, 695056, 695421, 695787, 696152, 696517, 696882, 697248, 697613, 697978, 698343, 698709, 699074, 699439, 699804, 700170, 700535, 700900, 701265, 701631, 701996, 702361, 702726, 703092, 703457, 703822, 704187, 704553, 704918, 705283, 705648, 706014, 706379, 706744, 707109, 707475, 707840, 708205, 708570, 708936, 709301, 709666, 710031, 710397, 710762, 711127, 711492, 711858, 712223, 712588, 712953, 713319, 713684, 714049, 714414, 714780, 715145, 715510, 715875, 716241, 716606, 716971, 717336, 717702, 718067, 718432, 718797, 719163, 719528, 719893, 720258, 720624, 720989, 721354, 721719, 722085, 722450, 722815, 723180, 723546, 723911, 724276, 724641, 725007, 725372, 725737, 726102, 726468, 726833, 727198, 727563, 727929, 728294, 728659, 729024, 729390, 729755, 730120, 730485, 730851, 731216, 731581, 731946, 732312, 732677, 733042, 733407, 733773, 734138, 734503, 734868, 735234, 735599, 735964, 736329, 736695, 737060, 737425, 737790, 738156, 738521, 738886, 739251, 739617, 739982, 740347, 740712, 741078, 741443, 741808, 742173, 742539, 742904, 743269, 743634, 744000, 744365, 744730, 745095, 745461, 745826, 746191, 746556, 746922, 747287, 747652, 748017, 748383, 748748, 749113, 749478, 749844, 750209, 750574, 750939, 751305, 751670, 752035, 752400, 752766, 753131, 753496, 753861, 754227, 754592, 754957, 755322, 755688, 756053, 756418, 756783, 757149, 757514, 757879, 758244, 758610, 758975, 759340, 759705, 760071, 760436, 760801, 761166, 761532, 761897, 762262, 762627, 762993, 763358, 763723, 764088, 764454, 764819, 765184, 765549, 765915, 766280, 766645]
\ No newline at end of file
diff --git a/go/mysql/datetime/time_zone_test.go b/go/mysql/datetime/time_zone_test.go
index 94745d0c71e..4bd1572755f 100644
--- a/go/mysql/datetime/time_zone_test.go
+++ b/go/mysql/datetime/time_zone_test.go
@@ -18,10 +18,50 @@ package datetime
import (
"testing"
+ "time"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
+func TestDST(t *testing.T) {
+ testCases := []struct {
+ time Time
+ year int
+ month time.Month
+ day int
+ tz string
+ expected string
+ }{
+ {
+ time: Time{hour: 130, minute: 34, second: 58},
+ year: 2023, month: 10, day: 24,
+ tz: "Europe/Madrid",
+ expected: "2023-10-29T10:34:58+01:00",
+ },
+ {
+ time: Time{hour: 130, minute: 34, second: 58},
+ year: 2023, month: 10, day: 29,
+ tz: "Europe/Madrid",
+ expected: "2023-11-03T10:34:58+01:00",
+ },
+ {
+ time: Time{hour: 130 | negMask, minute: 34, second: 58},
+ year: 2023, month: 11, day: 03,
+ tz: "Europe/Madrid",
+ expected: "2023-10-28T13:25:02+02:00",
+ },
+ }
+
+ for _, tc := range testCases {
+ tz, err := ParseTimeZone(tc.tz)
+ require.NoError(t, err)
+
+ got := tc.time.toStdTime(tc.year, tc.month, tc.day, tz)
+ assert.Equal(t, tc.expected, got.Format(time.RFC3339))
+ }
+}
+
func TestParseTimeZone(t *testing.T) {
testCases := []struct {
tz string
diff --git a/go/mysql/datetime/timeparts.go b/go/mysql/datetime/timeparts.go
index 32bda00ef43..a774099a93a 100644
--- a/go/mysql/datetime/timeparts.go
+++ b/go/mysql/datetime/timeparts.go
@@ -48,7 +48,7 @@ func (tp *timeparts) toDateTime(prec int) (DateTime, int, bool) {
if tp.yday > 0 {
return DateTime{}, 0, false
} else {
- if tp.month < 0 {
+ if tp.month < 1 {
tp.month = int(time.January)
}
if tp.day < 0 {
@@ -86,3 +86,7 @@ func (tp *timeparts) toDateTime(prec int) (DateTime, int, bool) {
func (tp *timeparts) isZero() bool {
return tp.year == 0 && tp.month == 0 && tp.day == 0 && tp.hour == 0 && tp.min == 0 && tp.sec == 0 && tp.nsec == 0
}
+
+func (tp *timeparts) toSeconds() int {
+ return tp.day*secondsPerDay + tp.hour*3600 + tp.min*60 + tp.sec
+}
diff --git a/go/mysql/decimal/decimal.go b/go/mysql/decimal/decimal.go
index 7293360ee52..a2b505a1232 100644
--- a/go/mysql/decimal/decimal.go
+++ b/go/mysql/decimal/decimal.go
@@ -677,6 +677,10 @@ func (d *Decimal) ensureInitialized() {
}
}
+func (d Decimal) IsInitialized() bool {
+ return d.value != nil
+}
+
// RescalePair rescales two decimals to common exponential value (minimal exp of both decimals)
func RescalePair(d1 Decimal, d2 Decimal) (Decimal, Decimal) {
d1.ensureInitialized()
@@ -693,13 +697,6 @@ func RescalePair(d1 Decimal, d2 Decimal) (Decimal, Decimal) {
return d1, d2.rescale(baseScale)
}
-func min(x, y int32) int32 {
- if x >= y {
- return y
- }
- return x
-}
-
// largestForm returns the largest decimal that can be represented
// with the given amount of integral and fractional digits
// Example:
diff --git a/go/mysql/decimal/weights.go b/go/mysql/decimal/weights.go
new file mode 100644
index 00000000000..9b8f43a0c65
--- /dev/null
+++ b/go/mysql/decimal/weights.go
@@ -0,0 +1,56 @@
+/*
+Copyright 2023 The Vitess Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package decimal
+
+// Our weight string format is normalizing the weight string to a fixed length,
+// so it becomes byte-ordered. The byte lengths are pre-computed based on
+// https://dev.mysql.com/doc/refman/8.0/en/fixed-point-types.html
+// and generated empirically with a manual loop:
+//
+// for i := 1; i <= 65; i++ {
+// dec, err := NewFromMySQL(bytes.Repeat([]byte("9"), i))
+// if err != nil {
+// t.Fatal(err)
+// }
+//
+// byteLengths = append(byteLengths, len(dec.value.Bytes()))
+// }
+var weightStringLengths = []int{
+ 0, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 6, 7, 7, 8, 8, 8,
+ 9, 9, 10, 10, 10, 11, 11, 12, 12, 13, 13, 13, 14, 14, 15, 15, 15,
+ 16, 16, 17, 17, 18, 18, 18, 19, 19, 20, 20, 20, 21, 21, 22, 22,
+ 23, 23, 23, 24, 24, 25, 25, 25, 26, 26, 27, 27, 27,
+}
+
+func (d Decimal) WeightString(dst []byte, length, precision int32) []byte {
+ dec := d.rescale(-precision)
+ dec = dec.Clamp(length-precision, precision)
+
+ buf := make([]byte, weightStringLengths[length]+1)
+ dec.value.FillBytes(buf[:])
+
+ if dec.value.Sign() < 0 {
+ for i := range buf {
+ buf[i] ^= 0xff
+ }
+ }
+ // Use the same trick as used for signed numbers on the first byte.
+ buf[0] ^= 0x80
+
+ dst = append(dst, buf[:]...)
+ return dst
+}
diff --git a/go/mysql/endtoend/client_test.go b/go/mysql/endtoend/client_test.go
index a48c9629d51..6591c454e8a 100644
--- a/go/mysql/endtoend/client_test.go
+++ b/go/mysql/endtoend/client_test.go
@@ -25,6 +25,8 @@ import (
"github.com/stretchr/testify/assert"
+ "vitess.io/vitess/go/mysql/sqlerror"
+
"github.com/stretchr/testify/require"
"vitess.io/vitess/go/mysql"
@@ -73,9 +75,9 @@ func TestKill(t *testing.T) {
// will differ.
err = <-errChan
if strings.Contains(err.Error(), "EOF") {
- assertSQLError(t, err, mysql.CRServerLost, mysql.SSUnknownSQLState, "EOF", "select sleep(10) from dual")
+ assertSQLError(t, err, sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "EOF", "select sleep(10) from dual")
} else {
- assertSQLError(t, err, mysql.CRServerLost, mysql.SSUnknownSQLState, "", "connection reset by peer")
+ assertSQLError(t, err, sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "", "connection reset by peer")
}
}
@@ -104,7 +106,7 @@ func TestKill2006(t *testing.T) {
// unix socket, we will get a broken pipe when the server
// closes the connection and we are trying to write the command.
_, err = conn.ExecuteFetch("select sleep(10) from dual", 1000, false)
- assertSQLError(t, err, mysql.CRServerGone, mysql.SSUnknownSQLState, "broken pipe", "select sleep(10) from dual")
+ assertSQLError(t, err, sqlerror.CRServerGone, sqlerror.SSUnknownSQLState, "broken pipe", "select sleep(10) from dual")
}
// TestDupEntry tests a duplicate key is properly raised.
@@ -123,7 +125,7 @@ func TestDupEntry(t *testing.T) {
t.Fatalf("first insert failed: %v", err)
}
_, err = conn.ExecuteFetch("insert into dup_entry(id, name) values(2, 10)", 0, false)
- assertSQLError(t, err, mysql.ERDupEntry, mysql.SSConstraintViolation, "Duplicate entry", "insert into dup_entry(id, name) values(2, 10)")
+ assertSQLError(t, err, sqlerror.ERDupEntry, sqlerror.SSConstraintViolation, "Duplicate entry", "insert into dup_entry(id, name) values(2, 10)")
}
// TestClientFoundRows tests if the CLIENT_FOUND_ROWS flag works.
diff --git a/go/mysql/endtoend/main_test.go b/go/mysql/endtoend/main_test.go
index ef7cb671c33..466735c02e4 100644
--- a/go/mysql/endtoend/main_test.go
+++ b/go/mysql/endtoend/main_test.go
@@ -27,6 +27,8 @@ import (
"github.com/stretchr/testify/require"
+ "vitess.io/vitess/go/mysql/sqlerror"
+
"vitess.io/vitess/go/mysql"
vtenv "vitess.io/vitess/go/vt/env"
"vitess.io/vitess/go/vt/mysqlctl"
@@ -41,11 +43,11 @@ var (
)
// assertSQLError makes sure we get the right error.
-func assertSQLError(t *testing.T, err error, code mysql.ErrorCode, sqlState string, subtext string, query string) {
+func assertSQLError(t *testing.T, err error, code sqlerror.ErrorCode, sqlState string, subtext string, query string) {
t.Helper()
require.Error(t, err, "was expecting SQLError %v / %v / %v but got no error.", code, sqlState, subtext)
- serr, ok := err.(*mysql.SQLError)
+ serr, ok := err.(*sqlerror.SQLError)
require.True(t, ok, "was expecting SQLError %v / %v / %v but got: %v", code, sqlState, subtext, err)
require.Equal(t, code, serr.Num, "was expecting SQLError %v / %v / %v but got code %v", code, sqlState, subtext, serr.Num)
require.Equal(t, sqlState, serr.State, "was expecting SQLError %v / %v / %v but got state %v", code, sqlState, subtext, serr.State)
diff --git a/go/mysql/endtoend/query_test.go b/go/mysql/endtoend/query_test.go
index 7565c2913e9..576960f2acb 100644
--- a/go/mysql/endtoend/query_test.go
+++ b/go/mysql/endtoend/query_test.go
@@ -26,6 +26,8 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "vitess.io/vitess/go/mysql/collations/colldata"
+
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/mysql/collations"
"vitess.io/vitess/go/sqltypes"
@@ -39,7 +41,7 @@ const (
func columnSize(cs collations.ID, size uint32) uint32 {
// utf8_general_ci results in smaller max column sizes because MySQL 5.7 is silly
- if cs.Get().Charset().Name() == "utf8mb3" {
+ if colldata.Lookup(cs).Charset().Name() == "utf8mb3" {
return size * 3 / 4
}
return size
@@ -321,6 +323,5 @@ func TestSysInfo(t *testing.T) {
func getDefaultCollationID() collations.ID {
collationHandler := collations.Local()
- collation := collationHandler.DefaultCollationForCharset(charsetName)
- return collation.ID()
+ return collationHandler.DefaultCollationForCharset(charsetName)
}
diff --git a/go/mysql/endtoend/replication_test.go b/go/mysql/endtoend/replication_test.go
index 15b966e1feb..0c1fa006347 100644
--- a/go/mysql/endtoend/replication_test.go
+++ b/go/mysql/endtoend/replication_test.go
@@ -29,11 +29,12 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
+ "vitess.io/vitess/go/mysql/sqlerror"
+
"vitess.io/vitess/go/mysql"
"vitess.io/vitess/go/mysql/binlog"
"vitess.io/vitess/go/sqltypes"
querypb "vitess.io/vitess/go/vt/proto/query"
- "vitess.io/vitess/go/vt/vtgate/evalengine"
)
// connectForReplication is a helper method to connect for replication
@@ -70,7 +71,7 @@ func connectForReplication(t *testing.T, rbr bool) (*mysql.Conn, mysql.BinlogFor
t.Fatalf("SHOW MASTER STATUS returned unexpected result: %v", result)
}
file := result.Rows[0][0].ToString()
- position, err := evalengine.ToUint64(result.Rows[0][1])
+ position, err := result.Rows[0][1].ToCastUint64()
require.NoError(t, err, "SHOW MASTER STATUS returned invalid position: %v", result.Rows[0][1])
// Tell the server that we understand the format of events
@@ -126,9 +127,9 @@ func TestReplicationConnectionClosing(t *testing.T) {
for {
data, err := conn.ReadPacket()
if err != nil {
- serr, ok := err.(*mysql.SQLError)
- assert.True(t, ok, "Got a non mysql.SQLError error: %v", err)
- assert.Equal(t, mysql.CRServerLost, serr.Num, "Got an unexpected mysql.SQLError error: %v", serr)
+ serr, ok := err.(*sqlerror.SQLError)
+ assert.True(t, ok, "Got a non sqlerror.SQLError error: %v", err)
+ assert.Equal(t, sqlerror.CRServerLost, serr.Num, "Got an unexpected sqlerror.SQLError error: %v", serr)
// we got the right error, all good.
return
diff --git a/go/mysql/endtoend/schema_change_test.go b/go/mysql/endtoend/schema_change_test.go
index 5fc90e37935..a9e72aaef5b 100644
--- a/go/mysql/endtoend/schema_change_test.go
+++ b/go/mysql/endtoend/schema_change_test.go
@@ -22,7 +22,7 @@ import (
"strings"
"testing"
- "vitess.io/vitess/go/vt/sidecardb"
+ "vitess.io/vitess/go/constants/sidecar"
"vitess.io/vitess/go/vt/sqlparser"
"github.com/stretchr/testify/require"
@@ -42,9 +42,9 @@ func TestChangeSchemaIsNoticed(t *testing.T) {
require.NoError(t, err)
defer conn.Close()
- clearQuery := sqlparser.BuildParsedQuery(mysql.ClearSchemaCopy, sidecardb.GetIdentifier()).Query
- insertQuery := sqlparser.BuildParsedQuery(mysql.InsertIntoSchemaCopy, sidecardb.GetIdentifier()).Query
- detectQuery := sqlparser.BuildParsedQuery(mysql.DetectSchemaChange, sidecardb.GetIdentifier()).Query
+ clearQuery := sqlparser.BuildParsedQuery(mysql.ClearSchemaCopy, sidecar.GetIdentifier()).Query
+ insertQuery := sqlparser.BuildParsedQuery(mysql.InsertIntoSchemaCopy, sidecar.GetIdentifier()).Query
+ detectQuery := sqlparser.BuildParsedQuery(mysql.DetectSchemaChange, sidecar.GetIdentifier()).Query
tests := []struct {
name string
diff --git a/go/mysql/fakesqldb/server.go b/go/mysql/fakesqldb/server.go
index b305842cfe9..7d3a79eeb85 100644
--- a/go/mysql/fakesqldb/server.go
+++ b/go/mysql/fakesqldb/server.go
@@ -29,6 +29,7 @@ import (
"testing"
"time"
+ "vitess.io/vitess/go/mysql/replication"
"vitess.io/vitess/go/vt/sqlparser"
"vitess.io/vitess/go/vt/log"
@@ -188,7 +189,7 @@ func New(t testing.TB) *DB {
authServer := mysql.NewAuthServerNone()
// Start listening.
- db.listener, err = mysql.NewListener("unix", socketFile, authServer, db, 0, 0, false, false)
+ db.listener, err = mysql.NewListener("unix", socketFile, authServer, db, 0, 0, false, false, 0)
if err != nil {
t.Fatalf("NewListener failed: %v", err)
}
@@ -382,7 +383,7 @@ func (db *DB) HandleQuery(c *mysql.Conn, query string, callback func(*sqltypes.R
if db.shouldClose.Load() {
c.Close()
- //log error
+ // log error
if err := callback(&sqltypes.Result{}); err != nil {
log.Errorf("callback failed : %v", err)
}
@@ -393,7 +394,7 @@ func (db *DB) HandleQuery(c *mysql.Conn, query string, callback func(*sqltypes.R
// The driver may send this at connection time, and we don't want it to
// interfere.
if key == "set names utf8" || strings.HasPrefix(key, "set collation_connection = ") {
- //log error
+ // log error
if err := callback(&sqltypes.Result{}); err != nil {
log.Errorf("callback failed : %v", err)
}
@@ -527,7 +528,7 @@ func (db *DB) ComBinlogDump(c *mysql.Conn, logFile string, binlogPos uint32) err
}
// ComBinlogDumpGTID is part of the mysql.Handler interface.
-func (db *DB) ComBinlogDumpGTID(c *mysql.Conn, logFile string, logPos uint64, gtidSet mysql.GTIDSet) error {
+func (db *DB) ComBinlogDumpGTID(c *mysql.Conn, logFile string, logPos uint64, gtidSet replication.GTIDSet) error {
return nil
}
diff --git a/go/mysql/flavor.go b/go/mysql/flavor.go
index a8ab1dbbcb7..edb64913c31 100644
--- a/go/mysql/flavor.go
+++ b/go/mysql/flavor.go
@@ -23,6 +23,8 @@ import (
"strconv"
"strings"
+ "vitess.io/vitess/go/mysql/replication"
+ "vitess.io/vitess/go/mysql/sqlerror"
"vitess.io/vitess/go/sqltypes"
"vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/vterrors"
@@ -31,7 +33,7 @@ import (
var (
// ErrNotReplica means there is no replication status.
// Returned by ShowReplicationStatus().
- ErrNotReplica = NewSQLError(ERNotReplica, SSUnknownSQLState, "no replication status")
+ ErrNotReplica = sqlerror.NewSQLError(sqlerror.ERNotReplica, sqlerror.SSUnknownSQLState, "no replication status")
// ErrNoPrimaryStatus means no status was returned by ShowPrimaryStatus().
ErrNoPrimaryStatus = errors.New("no master status")
@@ -75,10 +77,10 @@ const (
// 2. MariaDB 10.X
type flavor interface {
// primaryGTIDSet returns the current GTIDSet of a server.
- primaryGTIDSet(c *Conn) (GTIDSet, error)
+ primaryGTIDSet(c *Conn) (replication.GTIDSet, error)
// purgedGTIDSet returns the purged GTIDSet of a server.
- purgedGTIDSet(c *Conn) (GTIDSet, error)
+ purgedGTIDSet(c *Conn) (replication.GTIDSet, error)
// gtidMode returns the gtid mode of a server.
gtidMode(c *Conn) (string, error)
@@ -94,11 +96,11 @@ type flavor interface {
// startReplicationUntilAfter will start replication, but only allow it
// to run until `pos` is reached. After reaching pos, replication will be stopped again
- startReplicationUntilAfter(pos Position) string
+ startReplicationUntilAfter(pos replication.Position) string
// startSQLThreadUntilAfter will start replication's sql thread(s), but only allow it
// to run until `pos` is reached. After reaching pos, it will be stopped again
- startSQLThreadUntilAfter(pos Position) string
+ startSQLThreadUntilAfter(pos replication.Position) string
// stopReplicationCommand returns the command to stop the replication.
stopReplicationCommand() string
@@ -114,7 +116,7 @@ type flavor interface {
// sendBinlogDumpCommand sends the packet required to start
// dumping binlogs from the specified location.
- sendBinlogDumpCommand(c *Conn, serverID uint32, binlogFilename string, startPos Position) error
+ sendBinlogDumpCommand(c *Conn, serverID uint32, binlogFilename string, startPos replication.Position) error
// readBinlogEvent reads the next BinlogEvent from the connection.
readBinlogEvent(c *Conn) (BinlogEvent, error)
@@ -129,7 +131,7 @@ type flavor interface {
// setReplicationPositionCommands returns the commands to set the
// replication position at which the replica will resume.
- setReplicationPositionCommands(pos Position) []string
+ setReplicationPositionCommands(pos replication.Position) []string
// changeReplicationSourceArg returns the specific parameter to add to
// a "change primary" command.
@@ -137,17 +139,17 @@ type flavor interface {
// status returns the result of the appropriate status command,
// with parsed replication position.
- status(c *Conn) (ReplicationStatus, error)
+ status(c *Conn) (replication.ReplicationStatus, error)
// primaryStatus returns the result of 'SHOW MASTER STATUS',
// with parsed executed position.
- primaryStatus(c *Conn) (PrimaryStatus, error)
+ primaryStatus(c *Conn) (replication.PrimaryStatus, error)
// waitUntilPositionCommand returns the SQL command to issue
// to wait until the given position, until the context
// expires. The command returns -1 if it times out. It
// returns NULL if GTIDs are not enabled.
- waitUntilPositionCommand(ctx context.Context, pos Position) (string, error)
+ waitUntilPositionCommand(ctx context.Context, pos replication.Position) (string, error)
baseShowTables() string
baseShowTablesWithSizes() string
@@ -265,23 +267,23 @@ func (c *Conn) IsMariaDB() bool {
}
// PrimaryPosition returns the current primary's replication position.
-func (c *Conn) PrimaryPosition() (Position, error) {
+func (c *Conn) PrimaryPosition() (replication.Position, error) {
gtidSet, err := c.flavor.primaryGTIDSet(c)
if err != nil {
- return Position{}, err
+ return replication.Position{}, err
}
- return Position{
+ return replication.Position{
GTIDSet: gtidSet,
}, nil
}
// GetGTIDPurged returns the tablet's GTIDs which are purged.
-func (c *Conn) GetGTIDPurged() (Position, error) {
+func (c *Conn) GetGTIDPurged() (replication.Position, error) {
gtidSet, err := c.flavor.purgedGTIDSet(c)
if err != nil {
- return Position{}, err
+ return replication.Position{}, err
}
- return Position{
+ return replication.Position{
GTIDSet: gtidSet,
}, nil
}
@@ -297,13 +299,13 @@ func (c *Conn) GetServerUUID() (string, error) {
}
// PrimaryFilePosition returns the current primary's file based replication position.
-func (c *Conn) PrimaryFilePosition() (Position, error) {
+func (c *Conn) PrimaryFilePosition() (replication.Position, error) {
filePosFlavor := filePosFlavor{}
gtidSet, err := filePosFlavor.primaryGTIDSet(c)
if err != nil {
- return Position{}, err
+ return replication.Position{}, err
}
- return Position{
+ return replication.Position{
GTIDSet: gtidSet,
}, nil
}
@@ -319,14 +321,14 @@ func (c *Conn) RestartReplicationCommands() []string {
}
// StartReplicationUntilAfterCommand returns the command to start replication.
-func (c *Conn) StartReplicationUntilAfterCommand(pos Position) string {
+func (c *Conn) StartReplicationUntilAfterCommand(pos replication.Position) string {
return c.flavor.startReplicationUntilAfter(pos)
}
// StartSQLThreadUntilAfterCommand returns the command to start the replica's SQL
// thread(s) and have it run until it has reached the given position, at which point
// it will stop.
-func (c *Conn) StartSQLThreadUntilAfterCommand(pos Position) string {
+func (c *Conn) StartSQLThreadUntilAfterCommand(pos replication.Position) string {
return c.flavor.startSQLThreadUntilAfter(pos)
}
@@ -353,7 +355,7 @@ func (c *Conn) StartSQLThreadCommand() string {
// SendBinlogDumpCommand sends the flavor-specific version of
// the COM_BINLOG_DUMP command to start dumping raw binlog
// events over a server connection, starting at a given GTID.
-func (c *Conn) SendBinlogDumpCommand(serverID uint32, binlogFilename string, startPos Position) error {
+func (c *Conn) SendBinlogDumpCommand(serverID uint32, binlogFilename string, startPos replication.Position) error {
return c.flavor.sendBinlogDumpCommand(c, serverID, binlogFilename, startPos)
}
@@ -378,7 +380,7 @@ func (c *Conn) ResetReplicationParametersCommands() []string {
// SetReplicationPositionCommands returns the commands to set the
// replication position at which the replica will resume
// when it is later reparented with SetReplicationSourceCommand.
-func (c *Conn) SetReplicationPositionCommands(pos Position) []string {
+func (c *Conn) SetReplicationPositionCommands(pos replication.Position) []string {
return c.flavor.setReplicationPositionCommands(pos)
}
@@ -433,107 +435,15 @@ func resultToMap(qr *sqltypes.Result) (map[string]string, error) {
return result, nil
}
-// parseReplicationStatus parses the common (non-flavor-specific) fields of ReplicationStatus
-func parseReplicationStatus(fields map[string]string) ReplicationStatus {
- // The field names in the map are identical to what we receive from the database
- // Hence the names still contain Master
- status := ReplicationStatus{
- SourceHost: fields["Master_Host"],
- SourceUser: fields["Master_User"],
- SSLAllowed: fields["Master_SSL_Allowed"] == "Yes",
- AutoPosition: fields["Auto_Position"] == "1",
- UsingGTID: fields["Using_Gtid"] != "No" && fields["Using_Gtid"] != "",
- HasReplicationFilters: (fields["Replicate_Do_DB"] != "") || (fields["Replicate_Ignore_DB"] != "") || (fields["Replicate_Do_Table"] != "") || (fields["Replicate_Ignore_Table"] != "") || (fields["Replicate_Wild_Do_Table"] != "") || (fields["Replicate_Wild_Ignore_Table"] != ""),
- // These fields are returned from the underlying DB and cannot be renamed
- IOState: ReplicationStatusToState(fields["Slave_IO_Running"]),
- LastIOError: fields["Last_IO_Error"],
- SQLState: ReplicationStatusToState(fields["Slave_SQL_Running"]),
- LastSQLError: fields["Last_SQL_Error"],
- }
- parseInt, _ := strconv.ParseInt(fields["Master_Port"], 10, 32)
- status.SourcePort = int32(parseInt)
- parseInt, _ = strconv.ParseInt(fields["Connect_Retry"], 10, 32)
- status.ConnectRetry = int32(parseInt)
- parseUint, err := strconv.ParseUint(fields["Seconds_Behind_Master"], 10, 32)
- if err != nil {
- // we could not parse the value into a valid uint32 -- most commonly because the value is NULL from the
- // database -- so let's reflect that the underlying value was unknown on our last check
- status.ReplicationLagUnknown = true
- } else {
- status.ReplicationLagUnknown = false
- status.ReplicationLagSeconds = uint32(parseUint)
- }
- parseUint, _ = strconv.ParseUint(fields["Master_Server_Id"], 10, 32)
- status.SourceServerID = uint32(parseUint)
- parseUint, _ = strconv.ParseUint(fields["SQL_Delay"], 10, 32)
- status.SQLDelay = uint32(parseUint)
-
- executedPosStr := fields["Exec_Master_Log_Pos"]
- file := fields["Relay_Master_Log_File"]
- if file != "" && executedPosStr != "" {
- filePos, err := strconv.ParseUint(executedPosStr, 10, 32)
- if err == nil {
- status.FilePosition.GTIDSet = filePosGTID{
- file: file,
- pos: uint32(filePos),
- }
- }
- }
-
- readPosStr := fields["Read_Master_Log_Pos"]
- file = fields["Master_Log_File"]
- if file != "" && readPosStr != "" {
- fileRelayPos, err := strconv.ParseUint(readPosStr, 10, 32)
- if err == nil {
- status.RelayLogSourceBinlogEquivalentPosition.GTIDSet = filePosGTID{
- file: file,
- pos: uint32(fileRelayPos),
- }
- }
- }
-
- relayPosStr := fields["Relay_Log_Pos"]
- file = fields["Relay_Log_File"]
- if file != "" && relayPosStr != "" {
- relayFilePos, err := strconv.ParseUint(relayPosStr, 10, 32)
- if err == nil {
- status.RelayLogFilePosition.GTIDSet = filePosGTID{
- file: file,
- pos: uint32(relayFilePos),
- }
- }
- }
- return status
-}
-
// ShowReplicationStatus executes the right command to fetch replication status,
// and returns a parsed Position with other fields.
-func (c *Conn) ShowReplicationStatus() (ReplicationStatus, error) {
+func (c *Conn) ShowReplicationStatus() (replication.ReplicationStatus, error) {
return c.flavor.status(c)
}
-// parsePrimaryStatus parses the common fields of SHOW MASTER STATUS.
-func parsePrimaryStatus(fields map[string]string) PrimaryStatus {
- status := PrimaryStatus{}
-
- fileExecPosStr := fields["Position"]
- file := fields["File"]
- if file != "" && fileExecPosStr != "" {
- filePos, err := strconv.ParseUint(fileExecPosStr, 10, 32)
- if err == nil {
- status.FilePosition.GTIDSet = filePosGTID{
- file: file,
- pos: uint32(filePos),
- }
- }
- }
-
- return status
-}
-
// ShowPrimaryStatus executes the right SHOW MASTER STATUS command,
// and returns a parsed executed Position, as well as file based Position.
-func (c *Conn) ShowPrimaryStatus() (PrimaryStatus, error) {
+func (c *Conn) ShowPrimaryStatus() (replication.PrimaryStatus, error) {
return c.flavor.primaryStatus(c)
}
@@ -541,7 +451,7 @@ func (c *Conn) ShowPrimaryStatus() (PrimaryStatus, error) {
// to wait until the given position, until the context
// expires. The command returns -1 if it times out. It
// returns NULL if GTIDs are not enabled.
-func (c *Conn) WaitUntilPositionCommand(ctx context.Context, pos Position) (string, error) {
+func (c *Conn) WaitUntilPositionCommand(ctx context.Context, pos replication.Position) (string, error) {
return c.flavor.waitUntilPositionCommand(ctx, pos)
}
@@ -549,7 +459,7 @@ func (c *Conn) WaitUntilPositionCommand(ctx context.Context, pos Position) (stri
// to wait until the given position, until the context
// expires for the file position flavor. The command returns -1 if it times out. It
// returns NULL if GTIDs are not enabled.
-func (c *Conn) WaitUntilFilePositionCommand(ctx context.Context, pos Position) (string, error) {
+func (c *Conn) WaitUntilFilePositionCommand(ctx context.Context, pos replication.Position) (string, error) {
filePosFlavor := filePosFlavor{}
return filePosFlavor.waitUntilPositionCommand(ctx, pos)
}
@@ -568,3 +478,7 @@ func (c *Conn) BaseShowTablesWithSizes() string {
func (c *Conn) SupportsCapability(capability FlavorCapability) (bool, error) {
return c.flavor.supportsCapability(c.ServerVersion, capability)
}
+
+func init() {
+ flavors[replication.FilePosFlavorID] = newFilePosFlavor
+}
diff --git a/go/mysql/flavor_filepos.go b/go/mysql/flavor_filepos.go
index 9c2bdeb7407..bf4076b85b1 100644
--- a/go/mysql/flavor_filepos.go
+++ b/go/mysql/flavor_filepos.go
@@ -20,10 +20,11 @@ import (
"context"
"fmt"
"io"
- "strconv"
"strings"
"time"
+ "vitess.io/vitess/go/mysql/replication"
+ "vitess.io/vitess/go/mysql/sqlerror"
vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/vterrors"
)
@@ -40,7 +41,7 @@ func newFilePosFlavor() flavor {
}
// primaryGTIDSet is part of the Flavor interface.
-func (flv *filePosFlavor) primaryGTIDSet(c *Conn) (GTIDSet, error) {
+func (flv *filePosFlavor) primaryGTIDSet(c *Conn) (replication.GTIDSet, error) {
qr, err := c.ExecuteFetch("SHOW MASTER STATUS", 100, true /* wantfields */)
if err != nil {
return nil, err
@@ -53,19 +54,11 @@ func (flv *filePosFlavor) primaryGTIDSet(c *Conn) (GTIDSet, error) {
if err != nil {
return nil, err
}
- pos, err := strconv.ParseUint(resultMap["Position"], 0, 32)
- if err != nil {
- return nil, fmt.Errorf("invalid FilePos GTID (%v): expecting pos to be an integer", resultMap["Position"])
- }
-
- return filePosGTID{
- file: resultMap["File"],
- pos: uint32(pos),
- }, nil
+ return replication.ParseFilePosGTIDSet(fmt.Sprintf("%s:%s", resultMap["File"], resultMap["Position"]))
}
// purgedGTIDSet is part of the Flavor interface.
-func (flv *filePosFlavor) purgedGTIDSet(c *Conn) (GTIDSet, error) {
+func (flv *filePosFlavor) purgedGTIDSet(c *Conn) (replication.GTIDSet, error) {
return nil, nil
}
@@ -119,14 +112,14 @@ func (flv *filePosFlavor) startSQLThreadCommand() string {
}
// sendBinlogDumpCommand is part of the Flavor interface.
-func (flv *filePosFlavor) sendBinlogDumpCommand(c *Conn, serverID uint32, binlogFilename string, startPos Position) error {
- rpos, ok := startPos.GTIDSet.(filePosGTID)
+func (flv *filePosFlavor) sendBinlogDumpCommand(c *Conn, serverID uint32, binlogFilename string, startPos replication.Position) error {
+ rpos, ok := startPos.GTIDSet.(replication.FilePosGTID)
if !ok {
return fmt.Errorf("startPos.GTIDSet is wrong type - expected filePosGTID, got: %#v", startPos.GTIDSet)
}
- flv.file = rpos.file
- return c.WriteComBinlogDump(serverID, rpos.file, rpos.pos, 0)
+ flv.file = rpos.File
+ return c.WriteComBinlogDump(serverID, rpos.File, rpos.Pos, 0)
}
// readBinlogEvent is part of the Flavor interface.
@@ -143,7 +136,7 @@ func (flv *filePosFlavor) readBinlogEvent(c *Conn) (BinlogEvent, error) {
}
switch result[0] {
case EOFPacket:
- return nil, NewSQLError(CRServerLost, SSUnknownSQLState, "%v", io.EOF)
+ return nil, sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", io.EOF)
case ErrPacket:
return nil, ParseErrorPacket(result)
}
@@ -223,7 +216,7 @@ func (flv *filePosFlavor) resetReplicationParametersCommands(c *Conn) []string {
}
// setReplicationPositionCommands is part of the Flavor interface.
-func (flv *filePosFlavor) setReplicationPositionCommands(pos Position) []string {
+func (flv *filePosFlavor) setReplicationPositionCommands(pos replication.Position) []string {
return []string{
"unsupported",
}
@@ -235,64 +228,47 @@ func (flv *filePosFlavor) changeReplicationSourceArg() string {
}
// status is part of the Flavor interface.
-func (flv *filePosFlavor) status(c *Conn) (ReplicationStatus, error) {
+func (flv *filePosFlavor) status(c *Conn) (replication.ReplicationStatus, error) {
qr, err := c.ExecuteFetch("SHOW SLAVE STATUS", 100, true /* wantfields */)
if err != nil {
- return ReplicationStatus{}, err
+ return replication.ReplicationStatus{}, err
}
if len(qr.Rows) == 0 {
// The query returned no data, meaning the server
// is not configured as a replica.
- return ReplicationStatus{}, ErrNotReplica
+ return replication.ReplicationStatus{}, ErrNotReplica
}
resultMap, err := resultToMap(qr)
if err != nil {
- return ReplicationStatus{}, err
+ return replication.ReplicationStatus{}, err
}
- return parseFilePosReplicationStatus(resultMap)
-}
-
-func parseFilePosReplicationStatus(resultMap map[string]string) (ReplicationStatus, error) {
- status := parseReplicationStatus(resultMap)
-
- status.Position = status.FilePosition
- status.RelayLogPosition = status.RelayLogSourceBinlogEquivalentPosition
-
- return status, nil
+ return replication.ParseFilePosReplicationStatus(resultMap)
}
// primaryStatus is part of the Flavor interface.
-func (flv *filePosFlavor) primaryStatus(c *Conn) (PrimaryStatus, error) {
+func (flv *filePosFlavor) primaryStatus(c *Conn) (replication.PrimaryStatus, error) {
qr, err := c.ExecuteFetch("SHOW MASTER STATUS", 100, true /* wantfields */)
if err != nil {
- return PrimaryStatus{}, err
+ return replication.PrimaryStatus{}, err
}
if len(qr.Rows) == 0 {
// The query returned no data. We don't know how this could happen.
- return PrimaryStatus{}, ErrNoPrimaryStatus
+ return replication.PrimaryStatus{}, ErrNoPrimaryStatus
}
resultMap, err := resultToMap(qr)
if err != nil {
- return PrimaryStatus{}, err
+ return replication.PrimaryStatus{}, err
}
- return parseFilePosPrimaryStatus(resultMap)
-}
-
-func parseFilePosPrimaryStatus(resultMap map[string]string) (PrimaryStatus, error) {
- status := parsePrimaryStatus(resultMap)
-
- status.Position = status.FilePosition
-
- return status, nil
+ return replication.ParseFilePosPrimaryStatus(resultMap)
}
// waitUntilPositionCommand is part of the Flavor interface.
-func (flv *filePosFlavor) waitUntilPositionCommand(ctx context.Context, pos Position) (string, error) {
- filePosPos, ok := pos.GTIDSet.(filePosGTID)
+func (flv *filePosFlavor) waitUntilPositionCommand(ctx context.Context, pos replication.Position) (string, error) {
+ filePosPos, ok := pos.GTIDSet.(replication.FilePosGTID)
if !ok {
return "", fmt.Errorf("Position is not filePos compatible: %#v", pos.GTIDSet)
}
@@ -302,17 +278,17 @@ func (flv *filePosFlavor) waitUntilPositionCommand(ctx context.Context, pos Posi
if timeout <= 0 {
return "", fmt.Errorf("timed out waiting for position %v", pos)
}
- return fmt.Sprintf("SELECT MASTER_POS_WAIT('%s', %d, %.6f)", filePosPos.file, filePosPos.pos, timeout.Seconds()), nil
+ return fmt.Sprintf("SELECT MASTER_POS_WAIT('%s', %d, %.6f)", filePosPos.File, filePosPos.Pos, timeout.Seconds()), nil
}
- return fmt.Sprintf("SELECT MASTER_POS_WAIT('%s', %d)", filePosPos.file, filePosPos.pos), nil
+ return fmt.Sprintf("SELECT MASTER_POS_WAIT('%s', %d)", filePosPos.File, filePosPos.Pos), nil
}
-func (*filePosFlavor) startReplicationUntilAfter(pos Position) string {
+func (*filePosFlavor) startReplicationUntilAfter(pos replication.Position) string {
return "unsupported"
}
-func (*filePosFlavor) startSQLThreadUntilAfter(pos Position) string {
+func (*filePosFlavor) startSQLThreadUntilAfter(pos replication.Position) string {
return "unsupported"
}
diff --git a/go/mysql/flavor_filepos_test.go b/go/mysql/flavor_filepos_test.go
deleted file mode 100644
index be60f6a95a6..00000000000
--- a/go/mysql/flavor_filepos_test.go
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
-Copyright 2020 The Vitess Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package mysql
-
-import (
- "testing"
-
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-)
-
-func TestFilePosRetrieveSourceServerId(t *testing.T) {
- resultMap := map[string]string{
- "Master_Server_Id": "1",
- }
-
- want := ReplicationStatus{SourceServerID: 1}
- got, err := parseFilePosReplicationStatus(resultMap)
- require.NoError(t, err)
- assert.Equalf(t, got.SourceServerID, want.SourceServerID, "got SourceServerID: %v; want SourceServerID: %v", got.SourceServerID, want.SourceServerID)
-}
-
-func TestFilePosRetrieveExecutedPosition(t *testing.T) {
- resultMap := map[string]string{
- "Exec_Master_Log_Pos": "1307",
- "Relay_Master_Log_File": "master-bin.000002",
- "Read_Master_Log_Pos": "1308",
- "Master_Log_File": "master-bin.000003",
- "Relay_Log_Pos": "1309",
- "Relay_Log_File": "relay-bin.000004",
- }
-
- want := ReplicationStatus{
- Position: Position{GTIDSet: filePosGTID{file: "master-bin.000002", pos: 1307}},
- RelayLogPosition: Position{GTIDSet: filePosGTID{file: "master-bin.000003", pos: 1308}},
- FilePosition: Position{GTIDSet: filePosGTID{file: "master-bin.000002", pos: 1307}},
- RelayLogSourceBinlogEquivalentPosition: Position{GTIDSet: filePosGTID{file: "master-bin.000003", pos: 1308}},
- RelayLogFilePosition: Position{GTIDSet: filePosGTID{file: "relay-bin.000004", pos: 1309}},
- }
- got, err := parseFilePosReplicationStatus(resultMap)
- require.NoError(t, err)
- assert.Equalf(t, got.Position.GTIDSet, want.Position.GTIDSet, "got Position: %v; want Position: %v", got.Position.GTIDSet, want.Position.GTIDSet)
- assert.Equalf(t, got.RelayLogPosition.GTIDSet, want.RelayLogPosition.GTIDSet, "got RelayLogPosition: %v; want RelayLogPosition: %v", got.RelayLogPosition.GTIDSet, want.RelayLogPosition.GTIDSet)
- assert.Equalf(t, got.RelayLogFilePosition.GTIDSet, want.RelayLogFilePosition.GTIDSet, "got RelayLogFilePosition: %v; want RelayLogFilePosition: %v", got.RelayLogFilePosition.GTIDSet, want.RelayLogFilePosition.GTIDSet)
- assert.Equalf(t, got.FilePosition.GTIDSet, want.FilePosition.GTIDSet, "got FilePosition: %v; want FilePosition: %v", got.FilePosition.GTIDSet, want.FilePosition.GTIDSet)
- assert.Equalf(t, got.RelayLogSourceBinlogEquivalentPosition.GTIDSet, want.RelayLogSourceBinlogEquivalentPosition.GTIDSet, "got RelayLogSourceBinlogEquivalentPosition: %v; want RelayLogSourceBinlogEquivalentPosition: %v", got.RelayLogSourceBinlogEquivalentPosition.GTIDSet, want.RelayLogSourceBinlogEquivalentPosition.GTIDSet)
- assert.Equalf(t, got.Position.GTIDSet, got.FilePosition.GTIDSet, "FilePosition and Position don't match when they should for the FilePos flavor")
- assert.Equalf(t, got.RelayLogPosition.GTIDSet, got.RelayLogSourceBinlogEquivalentPosition.GTIDSet, "RelayLogPosition and RelayLogSourceBinlogEquivalentPosition don't match when they should for the FilePos flavor")
-}
-
-func TestFilePosShouldGetPosition(t *testing.T) {
- resultMap := map[string]string{
- "Position": "1307",
- "File": "source-bin.000003",
- }
-
- want := PrimaryStatus{
- Position: Position{GTIDSet: filePosGTID{file: "source-bin.000003", pos: 1307}},
- FilePosition: Position{GTIDSet: filePosGTID{file: "source-bin.000003", pos: 1307}},
- }
- got, err := parseFilePosPrimaryStatus(resultMap)
- require.NoError(t, err)
- assert.Equalf(t, got.Position.GTIDSet, want.Position.GTIDSet, "got Position: %v; want Position: %v", got.Position.GTIDSet, want.Position.GTIDSet)
- assert.Equalf(t, got.FilePosition.GTIDSet, want.FilePosition.GTIDSet, "got FilePosition: %v; want FilePosition: %v", got.FilePosition.GTIDSet, want.FilePosition.GTIDSet)
- assert.Equalf(t, got.Position.GTIDSet, got.FilePosition.GTIDSet, "FilePosition and Position don't match when they should for the FilePos flavor")
-}
diff --git a/go/mysql/flavor_mariadb.go b/go/mysql/flavor_mariadb.go
index 377ede1ecc8..15718542b45 100644
--- a/go/mysql/flavor_mariadb.go
+++ b/go/mysql/flavor_mariadb.go
@@ -18,12 +18,13 @@ limitations under the License.
package mysql
import (
+ "context"
"fmt"
"io"
"time"
- "context"
-
+ "vitess.io/vitess/go/mysql/replication"
+ "vitess.io/vitess/go/mysql/sqlerror"
"vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/vterrors"
)
@@ -41,7 +42,7 @@ var _ flavor = (*mariadbFlavor101)(nil)
var _ flavor = (*mariadbFlavor102)(nil)
// primaryGTIDSet is part of the Flavor interface.
-func (mariadbFlavor) primaryGTIDSet(c *Conn) (GTIDSet, error) {
+func (mariadbFlavor) primaryGTIDSet(c *Conn) (replication.GTIDSet, error) {
qr, err := c.ExecuteFetch("SELECT @@GLOBAL.gtid_binlog_pos", 1, false)
if err != nil {
return nil, err
@@ -50,11 +51,11 @@ func (mariadbFlavor) primaryGTIDSet(c *Conn) (GTIDSet, error) {
return nil, vterrors.Errorf(vtrpc.Code_INTERNAL, "unexpected result format for gtid_binlog_pos: %#v", qr)
}
- return parseMariadbGTIDSet(qr.Rows[0][0].ToString())
+ return replication.ParseMariadbGTIDSet(qr.Rows[0][0].ToString())
}
// purgedGTIDSet is part of the Flavor interface.
-func (mariadbFlavor) purgedGTIDSet(c *Conn) (GTIDSet, error) {
+func (mariadbFlavor) purgedGTIDSet(c *Conn) (replication.GTIDSet, error) {
return nil, nil
}
@@ -68,11 +69,11 @@ func (mariadbFlavor) gtidMode(c *Conn) (string, error) {
return "", nil
}
-func (mariadbFlavor) startReplicationUntilAfter(pos Position) string {
+func (mariadbFlavor) startReplicationUntilAfter(pos replication.Position) string {
return fmt.Sprintf("START SLAVE UNTIL master_gtid_pos = \"%s\"", pos)
}
-func (mariadbFlavor) startSQLThreadUntilAfter(pos Position) string {
+func (mariadbFlavor) startSQLThreadUntilAfter(pos replication.Position) string {
return fmt.Sprintf("START SLAVE SQL_THREAD UNTIL master_gtid_pos = \"%s\"", pos)
}
@@ -105,7 +106,7 @@ func (mariadbFlavor) startSQLThreadCommand() string {
}
// sendBinlogDumpCommand is part of the Flavor interface.
-func (mariadbFlavor) sendBinlogDumpCommand(c *Conn, serverID uint32, binlogFilename string, startPos Position) error {
+func (mariadbFlavor) sendBinlogDumpCommand(c *Conn, serverID uint32, binlogFilename string, startPos replication.Position) error {
// Tell the server that we understand GTIDs by setting
// mariadb_slave_capability to MARIA_SLAVE_CAPABILITY_GTID = 4 (MariaDB >= 10.0.1).
if _, err := c.ExecuteFetch("SET @mariadb_slave_capability=4", 0, false); err != nil {
@@ -154,7 +155,7 @@ func (mariadbFlavor) resetReplicationParametersCommands(c *Conn) []string {
}
// setReplicationPositionCommands is part of the Flavor interface.
-func (mariadbFlavor) setReplicationPositionCommands(pos Position) []string {
+func (mariadbFlavor) setReplicationPositionCommands(pos replication.Position) []string {
return []string{
// RESET MASTER will clear out gtid_binlog_pos,
// which then guarantees that gtid_current_pos = gtid_slave_pos,
@@ -182,54 +183,42 @@ func (mariadbFlavor) changeReplicationSourceArg() string {
}
// status is part of the Flavor interface.
-func (mariadbFlavor) status(c *Conn) (ReplicationStatus, error) {
+func (mariadbFlavor) status(c *Conn) (replication.ReplicationStatus, error) {
qr, err := c.ExecuteFetch("SHOW ALL SLAVES STATUS", 100, true /* wantfields */)
if err != nil {
- return ReplicationStatus{}, err
+ return replication.ReplicationStatus{}, err
}
if len(qr.Rows) == 0 {
// The query returned no data, meaning the server
// is not configured as a replica.
- return ReplicationStatus{}, ErrNotReplica
+ return replication.ReplicationStatus{}, ErrNotReplica
}
resultMap, err := resultToMap(qr)
if err != nil {
- return ReplicationStatus{}, err
- }
-
- return parseMariadbReplicationStatus(resultMap)
-}
-
-func parseMariadbReplicationStatus(resultMap map[string]string) (ReplicationStatus, error) {
- status := parseReplicationStatus(resultMap)
-
- var err error
- status.Position.GTIDSet, err = parseMariadbGTIDSet(resultMap["Gtid_Slave_Pos"])
- if err != nil {
- return ReplicationStatus{}, vterrors.Wrapf(err, "ReplicationStatus can't parse MariaDB GTID (Gtid_Slave_Pos: %#v)", resultMap["Gtid_Slave_Pos"])
+ return replication.ReplicationStatus{}, err
}
- return status, nil
+ return replication.ParseMariadbReplicationStatus(resultMap)
}
// primaryStatus is part of the Flavor interface.
-func (m mariadbFlavor) primaryStatus(c *Conn) (PrimaryStatus, error) {
+func (m mariadbFlavor) primaryStatus(c *Conn) (replication.PrimaryStatus, error) {
qr, err := c.ExecuteFetch("SHOW MASTER STATUS", 100, true /* wantfields */)
if err != nil {
- return PrimaryStatus{}, err
+ return replication.PrimaryStatus{}, err
}
if len(qr.Rows) == 0 {
// The query returned no data. We don't know how this could happen.
- return PrimaryStatus{}, ErrNoPrimaryStatus
+ return replication.PrimaryStatus{}, ErrNoPrimaryStatus
}
resultMap, err := resultToMap(qr)
if err != nil {
- return PrimaryStatus{}, err
+ return replication.PrimaryStatus{}, err
}
- status := parsePrimaryStatus(resultMap)
+ status := replication.ParsePrimaryStatus(resultMap)
status.Position.GTIDSet, err = m.primaryGTIDSet(c)
return status, err
}
@@ -238,7 +227,7 @@ func (m mariadbFlavor) primaryStatus(c *Conn) (PrimaryStatus, error) {
//
// Note: Unlike MASTER_POS_WAIT(), MASTER_GTID_WAIT() will continue waiting even
// if the sql thread stops. If that is a problem, we'll have to change this.
-func (mariadbFlavor) waitUntilPositionCommand(ctx context.Context, pos Position) (string, error) {
+func (mariadbFlavor) waitUntilPositionCommand(ctx context.Context, pos replication.Position) (string, error) {
if deadline, ok := ctx.Deadline(); ok {
timeout := time.Until(deadline)
if timeout <= 0 {
@@ -260,7 +249,7 @@ func (mariadbFlavor) readBinlogEvent(c *Conn) (BinlogEvent, error) {
}
switch result[0] {
case EOFPacket:
- return nil, NewSQLError(CRServerLost, SSUnknownSQLState, "%v", io.EOF)
+ return nil, sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", io.EOF)
case ErrPacket:
return nil, ParseErrorPacket(result)
}
diff --git a/go/mysql/flavor_mariadb_test.go b/go/mysql/flavor_mariadb_test.go
index a2741c27148..250d664e4af 100644
--- a/go/mysql/flavor_mariadb_test.go
+++ b/go/mysql/flavor_mariadb_test.go
@@ -17,11 +17,9 @@ limitations under the License.
package mysql
import (
- "fmt"
"testing"
"github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
)
func TestMariadbSetReplicationSourceCommand(t *testing.T) {
@@ -77,51 +75,3 @@ func TestMariadbSetReplicationSourceCommandSSL(t *testing.T) {
assert.Equal(t, want, got, "mariadbFlavor.SetReplicationSourceCommand(%#v, %#v, %#v, %#v) = %#v, want %#v", params, host, port, connectRetry, got, want)
}
-
-func TestMariadbRetrieveSourceServerId(t *testing.T) {
- resultMap := map[string]string{
- "Master_Server_Id": "1",
- "Gtid_Slave_Pos": "0-101-2320",
- }
-
- want := ReplicationStatus{SourceServerID: 1}
- got, err := parseMariadbReplicationStatus(resultMap)
- require.NoError(t, err)
- assert.Equal(t, got.SourceServerID, want.SourceServerID, fmt.Sprintf("got SourceServerID: %v; want SourceServerID: %v", got.SourceServerID, want.SourceServerID))
-}
-
-func TestMariadbRetrieveFileBasedPositions(t *testing.T) {
- resultMap := map[string]string{
- "Exec_Master_Log_Pos": "1307",
- "Relay_Master_Log_File": "master-bin.000002",
- "Read_Master_Log_Pos": "1308",
- "Master_Log_File": "master-bin.000003",
- "Gtid_Slave_Pos": "0-101-2320",
- "Relay_Log_Pos": "1309",
- "Relay_Log_File": "relay-bin.000004",
- }
-
- want := ReplicationStatus{
- FilePosition: Position{GTIDSet: filePosGTID{file: "master-bin.000002", pos: 1307}},
- RelayLogSourceBinlogEquivalentPosition: Position{GTIDSet: filePosGTID{file: "master-bin.000003", pos: 1308}},
- RelayLogFilePosition: Position{GTIDSet: filePosGTID{file: "relay-bin.000004", pos: 1309}},
- }
- got, err := parseMariadbReplicationStatus(resultMap)
- require.NoError(t, err)
- assert.Equalf(t, got.RelayLogFilePosition.GTIDSet, want.RelayLogFilePosition.GTIDSet, "got RelayLogFilePosition: %v; want RelayLogFilePosition: %v", got.RelayLogFilePosition.GTIDSet, want.RelayLogFilePosition.GTIDSet)
- assert.Equal(t, got.FilePosition.GTIDSet, want.FilePosition.GTIDSet, fmt.Sprintf("got FilePosition: %v; want FilePosition: %v", got.FilePosition.GTIDSet, want.FilePosition.GTIDSet))
- assert.Equal(t, got.RelayLogSourceBinlogEquivalentPosition.GTIDSet, want.RelayLogSourceBinlogEquivalentPosition.GTIDSet, fmt.Sprintf("got RelayLogSourceBinlogEquivalentPosition: %v; want RelayLogSourceBinlogEquivalentPosition: %v", got.RelayLogSourceBinlogEquivalentPosition.GTIDSet, want.RelayLogSourceBinlogEquivalentPosition.GTIDSet))
-}
-
-func TestMariadbShouldGetNilRelayLogPosition(t *testing.T) {
- resultMap := map[string]string{
- "Exec_Master_Log_Pos": "1307",
- "Relay_Master_Log_File": "master-bin.000002",
- "Read_Master_Log_Pos": "1308",
- "Master_Log_File": "master-bin.000003",
- "Gtid_Slave_Pos": "0-101-2320",
- }
- got, err := parseMariadbReplicationStatus(resultMap)
- require.NoError(t, err)
- assert.Truef(t, got.RelayLogPosition.IsZero(), "Got a filled in RelayLogPosition. For MariaDB we should get back nil, because MariaDB does not return the retrieved GTIDSet. got: %#v", got.RelayLogPosition)
-}
diff --git a/go/mysql/flavor_mysql.go b/go/mysql/flavor_mysql.go
index 388986e96fe..bc5f31006e5 100644
--- a/go/mysql/flavor_mysql.go
+++ b/go/mysql/flavor_mysql.go
@@ -22,6 +22,8 @@ import (
"io"
"time"
+ "vitess.io/vitess/go/mysql/replication"
+ "vitess.io/vitess/go/mysql/sqlerror"
"vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/vterrors"
)
@@ -43,7 +45,7 @@ var _ flavor = (*mysqlFlavor57)(nil)
var _ flavor = (*mysqlFlavor80)(nil)
// primaryGTIDSet is part of the Flavor interface.
-func (mysqlFlavor) primaryGTIDSet(c *Conn) (GTIDSet, error) {
+func (mysqlFlavor) primaryGTIDSet(c *Conn) (replication.GTIDSet, error) {
// keep @@global as lowercase, as some servers like the Ripple binlog server only honors a lowercase `global` value
qr, err := c.ExecuteFetch("SELECT @@global.gtid_executed", 1, false)
if err != nil {
@@ -52,11 +54,11 @@ func (mysqlFlavor) primaryGTIDSet(c *Conn) (GTIDSet, error) {
if len(qr.Rows) != 1 || len(qr.Rows[0]) != 1 {
return nil, vterrors.Errorf(vtrpc.Code_INTERNAL, "unexpected result format for gtid_executed: %#v", qr)
}
- return ParseMysql56GTIDSet(qr.Rows[0][0].ToString())
+ return replication.ParseMysql56GTIDSet(qr.Rows[0][0].ToString())
}
// purgedGTIDSet is part of the Flavor interface.
-func (mysqlFlavor) purgedGTIDSet(c *Conn) (GTIDSet, error) {
+func (mysqlFlavor) purgedGTIDSet(c *Conn) (replication.GTIDSet, error) {
// keep @@global as lowercase, as some servers like the Ripple binlog server only honors a lowercase `global` value
qr, err := c.ExecuteFetch("SELECT @@global.gtid_purged", 1, false)
if err != nil {
@@ -65,7 +67,7 @@ func (mysqlFlavor) purgedGTIDSet(c *Conn) (GTIDSet, error) {
if len(qr.Rows) != 1 || len(qr.Rows[0]) != 1 {
return nil, vterrors.Errorf(vtrpc.Code_INTERNAL, "unexpected result format for gtid_purged: %#v", qr)
}
- return ParseMysql56GTIDSet(qr.Rows[0][0].ToString())
+ return replication.ParseMysql56GTIDSet(qr.Rows[0][0].ToString())
}
// serverUUID is part of the Flavor interface.
@@ -105,11 +107,11 @@ func (mysqlFlavor) restartReplicationCommands() []string {
}
}
-func (mysqlFlavor) startReplicationUntilAfter(pos Position) string {
+func (mysqlFlavor) startReplicationUntilAfter(pos replication.Position) string {
return fmt.Sprintf("START SLAVE UNTIL SQL_AFTER_GTIDS = '%s'", pos)
}
-func (mysqlFlavor) startSQLThreadUntilAfter(pos Position) string {
+func (mysqlFlavor) startSQLThreadUntilAfter(pos replication.Position) string {
return fmt.Sprintf("START SLAVE SQL_THREAD UNTIL SQL_AFTER_GTIDS = '%s'", pos)
}
@@ -130,8 +132,8 @@ func (mysqlFlavor) startSQLThreadCommand() string {
}
// sendBinlogDumpCommand is part of the Flavor interface.
-func (mysqlFlavor) sendBinlogDumpCommand(c *Conn, serverID uint32, binlogFilename string, startPos Position) error {
- gtidSet, ok := startPos.GTIDSet.(Mysql56GTIDSet)
+func (mysqlFlavor) sendBinlogDumpCommand(c *Conn, serverID uint32, binlogFilename string, startPos replication.Position) error {
+ gtidSet, ok := startPos.GTIDSet.(replication.Mysql56GTIDSet)
if !ok {
return vterrors.Errorf(vtrpc.Code_INTERNAL, "startPos.GTIDSet is wrong type - expected Mysql56GTIDSet, got: %#v", startPos.GTIDSet)
}
@@ -163,7 +165,7 @@ func (mysqlFlavor) resetReplicationParametersCommands(c *Conn) []string {
}
// setReplicationPositionCommands is part of the Flavor interface.
-func (mysqlFlavor) setReplicationPositionCommands(pos Position) []string {
+func (mysqlFlavor) setReplicationPositionCommands(pos replication.Position) []string {
return []string{
"RESET MASTER", // We must clear gtid_executed before setting gtid_purged.
fmt.Sprintf("SET GLOBAL gtid_purged = '%s'", pos),
@@ -176,88 +178,46 @@ func (mysqlFlavor) changeReplicationSourceArg() string {
}
// status is part of the Flavor interface.
-func (mysqlFlavor) status(c *Conn) (ReplicationStatus, error) {
+func (mysqlFlavor) status(c *Conn) (replication.ReplicationStatus, error) {
qr, err := c.ExecuteFetch("SHOW SLAVE STATUS", 100, true /* wantfields */)
if err != nil {
- return ReplicationStatus{}, err
+ return replication.ReplicationStatus{}, err
}
if len(qr.Rows) == 0 {
// The query returned no data, meaning the server
// is not configured as a replica.
- return ReplicationStatus{}, ErrNotReplica
+ return replication.ReplicationStatus{}, ErrNotReplica
}
resultMap, err := resultToMap(qr)
if err != nil {
- return ReplicationStatus{}, err
+ return replication.ReplicationStatus{}, err
}
- return parseMysqlReplicationStatus(resultMap)
-}
-
-func parseMysqlReplicationStatus(resultMap map[string]string) (ReplicationStatus, error) {
- status := parseReplicationStatus(resultMap)
- uuidString := resultMap["Master_UUID"]
- if uuidString != "" {
- sid, err := ParseSID(uuidString)
- if err != nil {
- return ReplicationStatus{}, vterrors.Wrapf(err, "cannot decode SourceUUID")
- }
- status.SourceUUID = sid
- }
-
- var err error
- status.Position.GTIDSet, err = ParseMysql56GTIDSet(resultMap["Executed_Gtid_Set"])
- if err != nil {
- return ReplicationStatus{}, vterrors.Wrapf(err, "ReplicationStatus can't parse MySQL 5.6 GTID (Executed_Gtid_Set: %#v)", resultMap["Executed_Gtid_Set"])
- }
- relayLogGTIDSet, err := ParseMysql56GTIDSet(resultMap["Retrieved_Gtid_Set"])
- if err != nil {
- return ReplicationStatus{}, vterrors.Wrapf(err, "ReplicationStatus can't parse MySQL 5.6 GTID (Retrieved_Gtid_Set: %#v)", resultMap["Retrieved_Gtid_Set"])
- }
- // We take the union of the executed and retrieved gtidset, because the retrieved gtidset only represents GTIDs since
- // the relay log has been reset. To get the full Position, we need to take a union of executed GTIDSets, since these would
- // have been in the relay log's GTIDSet in the past, prior to a reset.
- status.RelayLogPosition.GTIDSet = status.Position.GTIDSet.Union(relayLogGTIDSet)
-
- return status, nil
+ return replication.ParseMysqlReplicationStatus(resultMap)
}
// primaryStatus is part of the Flavor interface.
-func (mysqlFlavor) primaryStatus(c *Conn) (PrimaryStatus, error) {
+func (mysqlFlavor) primaryStatus(c *Conn) (replication.PrimaryStatus, error) {
qr, err := c.ExecuteFetch("SHOW MASTER STATUS", 100, true /* wantfields */)
if err != nil {
- return PrimaryStatus{}, err
+ return replication.PrimaryStatus{}, err
}
if len(qr.Rows) == 0 {
// The query returned no data. We don't know how this could happen.
- return PrimaryStatus{}, ErrNoPrimaryStatus
+ return replication.PrimaryStatus{}, ErrNoPrimaryStatus
}
resultMap, err := resultToMap(qr)
if err != nil {
- return PrimaryStatus{}, err
+ return replication.PrimaryStatus{}, err
}
- return parseMysqlPrimaryStatus(resultMap)
+ return replication.ParseMysqlPrimaryStatus(resultMap)
}
-func parseMysqlPrimaryStatus(resultMap map[string]string) (PrimaryStatus, error) {
- status := parsePrimaryStatus(resultMap)
-
- var err error
- status.Position.GTIDSet, err = ParseMysql56GTIDSet(resultMap["Executed_Gtid_Set"])
- if err != nil {
- return PrimaryStatus{}, vterrors.Wrapf(err, "PrimaryStatus can't parse MySQL 5.6 GTID (Executed_Gtid_Set: %#v)", resultMap["Executed_Gtid_Set"])
- }
-
- return status, nil
-}
-
-// waitUntilPositionCommand is part of the Flavor interface.
-
// waitUntilPositionCommand is part of the Flavor interface.
-func (mysqlFlavor) waitUntilPositionCommand(ctx context.Context, pos Position) (string, error) {
+func (mysqlFlavor) waitUntilPositionCommand(ctx context.Context, pos replication.Position) (string, error) {
// A timeout of 0 means wait indefinitely.
timeoutSeconds := 0
if deadline, ok := ctx.Deadline(); ok {
@@ -285,7 +245,7 @@ func (mysqlFlavor) readBinlogEvent(c *Conn) (BinlogEvent, error) {
}
switch result[0] {
case EOFPacket:
- return nil, NewSQLError(CRServerLost, SSUnknownSQLState, "%v", io.EOF)
+ return nil, sqlerror.NewSQLError(sqlerror.CRServerLost, sqlerror.SSUnknownSQLState, "%v", io.EOF)
case ErrPacket:
return nil, ParseErrorPacket(result)
}
@@ -361,7 +321,7 @@ const TablesWithSize80 = `SELECT t.table_name,
i.allocated_size
FROM information_schema.tables t
LEFT JOIN information_schema.innodb_tablespaces i
- ON i.name = CONCAT(t.table_schema, '/', t.table_name) COLLATE utf8_general_ci
+ ON i.name = CONCAT(t.table_schema, '/', t.table_name) COLLATE utf8mb3_general_ci
WHERE
t.table_schema = database() AND not t.create_options <=> 'partitioned'
UNION ALL
@@ -374,7 +334,7 @@ UNION ALL
SUM(i.allocated_size)
FROM information_schema.tables t
LEFT JOIN information_schema.innodb_tablespaces i
- ON i.name LIKE (CONCAT(t.table_schema, '/', t.table_name, '#p#%') COLLATE utf8_general_ci )
+ ON i.name LIKE (CONCAT(t.table_schema, '/', t.table_name, '#p#%') COLLATE utf8mb3_general_ci )
WHERE
t.table_schema = database() AND t.create_options <=> 'partitioned'
GROUP BY
diff --git a/go/mysql/flavor_mysql_test.go b/go/mysql/flavor_mysql_test.go
index 75d6a3ebc65..0e1b749633a 100644
--- a/go/mysql/flavor_mysql_test.go
+++ b/go/mysql/flavor_mysql_test.go
@@ -20,7 +20,6 @@ import (
"testing"
"github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
)
func TestMysql56SetReplicationSourceCommand(t *testing.T) {
@@ -76,74 +75,3 @@ func TestMysql56SetReplicationSourceCommandSSL(t *testing.T) {
assert.Equal(t, want, got, "mysqlFlavor.SetReplicationSourceCommand(%#v, %#v, %#v, %#v) = %#v, want %#v", params, host, port, connectRetry, got, want)
}
-
-func TestMysqlRetrieveSourceServerId(t *testing.T) {
- resultMap := map[string]string{
- "Master_Server_Id": "1",
- }
-
- want := ReplicationStatus{SourceServerID: 1}
- got, err := parseMysqlReplicationStatus(resultMap)
- require.NoError(t, err)
- assert.Equalf(t, got.SourceServerID, want.SourceServerID, "got SourceServerID: %v; want SourceServerID: %v", got.SourceServerID, want.SourceServerID)
-}
-
-func TestMysqlRetrieveFileBasedPositions(t *testing.T) {
- resultMap := map[string]string{
- "Exec_Master_Log_Pos": "1307",
- "Relay_Master_Log_File": "master-bin.000002",
- "Read_Master_Log_Pos": "1308",
- "Master_Log_File": "master-bin.000003",
- "Relay_Log_Pos": "1309",
- "Relay_Log_File": "relay-bin.000004",
- }
-
- want := ReplicationStatus{
- FilePosition: Position{GTIDSet: filePosGTID{file: "master-bin.000002", pos: 1307}},
- RelayLogSourceBinlogEquivalentPosition: Position{GTIDSet: filePosGTID{file: "master-bin.000003", pos: 1308}},
- RelayLogFilePosition: Position{GTIDSet: filePosGTID{file: "relay-bin.000004", pos: 1309}},
- }
- got, err := parseMysqlReplicationStatus(resultMap)
- require.NoError(t, err)
- assert.Equalf(t, got.FilePosition.GTIDSet, want.FilePosition.GTIDSet, "got FilePosition: %v; want FilePosition: %v", got.FilePosition.GTIDSet, want.FilePosition.GTIDSet)
- assert.Equalf(t, got.RelayLogFilePosition.GTIDSet, want.RelayLogFilePosition.GTIDSet, "got RelayLogFilePosition: %v; want RelayLogFilePosition: %v", got.RelayLogFilePosition.GTIDSet, want.RelayLogFilePosition.GTIDSet)
- assert.Equalf(t, got.RelayLogSourceBinlogEquivalentPosition.GTIDSet, want.RelayLogSourceBinlogEquivalentPosition.GTIDSet, "got RelayLogSourceBinlogEquivalentPosition: %v; want RelayLogSourceBinlogEquivalentPosition: %v", got.RelayLogSourceBinlogEquivalentPosition.GTIDSet, want.RelayLogSourceBinlogEquivalentPosition.GTIDSet)
-}
-
-func TestMysqlShouldGetRelayLogPosition(t *testing.T) {
- resultMap := map[string]string{
- "Executed_Gtid_Set": "3e11fa47-71ca-11e1-9e33-c80aa9429562:1-5",
- "Retrieved_Gtid_Set": "3e11fa47-71ca-11e1-9e33-c80aa9429562:6-9",
- "Exec_Master_Log_Pos": "1307",
- "Relay_Master_Log_File": "master-bin.000002",
- "Read_Master_Log_Pos": "1308",
- "Master_Log_File": "master-bin.000003",
- }
-
- sid, _ := ParseSID("3e11fa47-71ca-11e1-9e33-c80aa9429562")
- want := ReplicationStatus{
- Position: Position{GTIDSet: Mysql56GTIDSet{sid: []interval{{start: 1, end: 5}}}},
- RelayLogPosition: Position{GTIDSet: Mysql56GTIDSet{sid: []interval{{start: 1, end: 9}}}},
- }
- got, err := parseMysqlReplicationStatus(resultMap)
- require.NoError(t, err)
- assert.Equalf(t, got.RelayLogPosition.GTIDSet.String(), want.RelayLogPosition.GTIDSet.String(), "got RelayLogPosition: %v; want RelayLogPosition: %v", got.RelayLogPosition.GTIDSet, want.RelayLogPosition.GTIDSet)
-}
-
-func TestMysqlShouldGetPosition(t *testing.T) {
- resultMap := map[string]string{
- "Executed_Gtid_Set": "3e11fa47-71ca-11e1-9e33-c80aa9429562:1-5",
- "Position": "1307",
- "File": "source-bin.000003",
- }
-
- sid, _ := ParseSID("3e11fa47-71ca-11e1-9e33-c80aa9429562")
- want := PrimaryStatus{
- Position: Position{GTIDSet: Mysql56GTIDSet{sid: []interval{{start: 1, end: 5}}}},
- FilePosition: Position{GTIDSet: filePosGTID{file: "source-bin.000003", pos: 1307}},
- }
- got, err := parseMysqlPrimaryStatus(resultMap)
- require.NoError(t, err)
- assert.Equalf(t, got.Position.GTIDSet.String(), want.Position.GTIDSet.String(), "got Position: %v; want Position: %v", got.Position.GTIDSet, want.Position.GTIDSet)
- assert.Equalf(t, got.FilePosition.GTIDSet.String(), want.FilePosition.GTIDSet.String(), "got FilePosition: %v; want FilePosition: %v", got.FilePosition.GTIDSet, want.FilePosition.GTIDSet)
-}
diff --git a/go/mysql/flavor_mysqlgr.go b/go/mysql/flavor_mysqlgr.go
index 33bd1e6e3e1..e96a6433f73 100644
--- a/go/mysql/flavor_mysqlgr.go
+++ b/go/mysql/flavor_mysqlgr.go
@@ -21,6 +21,7 @@ import (
"fmt"
"math"
+ "vitess.io/vitess/go/mysql/replication"
"vitess.io/vitess/go/vt/proto/vtrpc"
"vitess.io/vitess/go/vt/vterrors"
@@ -57,12 +58,12 @@ func (mysqlGRFlavor) restartReplicationCommands() []string {
}
// startReplicationUntilAfter is disabled in mysqlGRFlavor
-func (mysqlGRFlavor) startReplicationUntilAfter(pos Position) string {
+func (mysqlGRFlavor) startReplicationUntilAfter(pos replication.Position) string {
return ""
}
// startSQLThreadUntilAfter is disabled in mysqlGRFlavor
-func (mysqlGRFlavor) startSQLThreadUntilAfter(pos Position) string {
+func (mysqlGRFlavor) startSQLThreadUntilAfter(pos replication.Position) string {
return ""
}
@@ -99,7 +100,7 @@ func (mysqlGRFlavor) resetReplicationParametersCommands(c *Conn) []string {
}
// setReplicationPositionCommands is disabled in mysqlGRFlavor
-func (mysqlGRFlavor) setReplicationPositionCommands(pos Position) []string {
+func (mysqlGRFlavor) setReplicationPositionCommands(pos replication.Position) []string {
return []string{}
}
@@ -110,8 +111,8 @@ func (mysqlGRFlavor) setReplicationPositionCommands(pos Position) []string {
// TODO: Right now the GR's lag is defined as the lag between a node processing a txn
// and the time the txn was committed. We should consider reporting lag between current queueing txn timestamp
// from replication_connection_status and the current processing txn's commit timestamp
-func (mysqlGRFlavor) status(c *Conn) (ReplicationStatus, error) {
- res := ReplicationStatus{}
+func (mysqlGRFlavor) status(c *Conn) (replication.ReplicationStatus, error) {
+ res := replication.ReplicationStatus{}
// Get primary node information
query := `SELECT
MEMBER_HOST,
@@ -125,7 +126,7 @@ func (mysqlGRFlavor) status(c *Conn) (ReplicationStatus, error) {
return nil
})
if err != nil {
- return ReplicationStatus{}, err
+ return replication.ReplicationStatus{}, err
}
query = `SELECT
@@ -148,7 +149,7 @@ func (mysqlGRFlavor) status(c *Conn) (ReplicationStatus, error) {
return nil
})
if err != nil {
- return ReplicationStatus{}, err
+ return replication.ReplicationStatus{}, err
}
// if chanel is not set, it means the state is not ONLINE or RECOVERING
// return partial result early
@@ -160,26 +161,26 @@ func (mysqlGRFlavor) status(c *Conn) (ReplicationStatus, error) {
query = fmt.Sprintf(`SELECT SERVICE_STATE
FROM performance_schema.replication_connection_status
WHERE CHANNEL_NAME='%s'`, chanel)
- var connectionState ReplicationState
+ var connectionState replication.ReplicationState
err = fetchStatusForGroupReplication(c, query, func(values []sqltypes.Value) error {
- connectionState = ReplicationStatusToState(values[0].ToString())
+ connectionState = replication.ReplicationStatusToState(values[0].ToString())
return nil
})
if err != nil {
- return ReplicationStatus{}, err
+ return replication.ReplicationStatus{}, err
}
res.IOState = connectionState
// Populate SQLState from replication_connection_status
- var applierState ReplicationState
+ var applierState replication.ReplicationState
query = fmt.Sprintf(`SELECT SERVICE_STATE
FROM performance_schema.replication_applier_status_by_coordinator
WHERE CHANNEL_NAME='%s'`, chanel)
err = fetchStatusForGroupReplication(c, query, func(values []sqltypes.Value) error {
- applierState = ReplicationStatusToState(values[0].ToString())
+ applierState = replication.ReplicationStatusToState(values[0].ToString())
return nil
})
if err != nil {
- return ReplicationStatus{}, err
+ return replication.ReplicationStatus{}, err
}
res.SQLState = applierState
@@ -197,17 +198,17 @@ func (mysqlGRFlavor) status(c *Conn) (ReplicationStatus, error) {
return nil
})
if err != nil {
- return ReplicationStatus{}, err
+ return replication.ReplicationStatus{}, err
}
return res, nil
}
-func parsePrimaryGroupMember(res *ReplicationStatus, row []sqltypes.Value) {
+func parsePrimaryGroupMember(res *replication.ReplicationStatus, row []sqltypes.Value) {
res.SourceHost = row[0].ToString() /* MEMBER_HOST */
res.SourcePort, _ = row[1].ToInt32() /* MEMBER_PORT */
}
-func parseReplicationApplierLag(res *ReplicationStatus, row []sqltypes.Value) {
+func parseReplicationApplierLag(res *replication.ReplicationStatus, row []sqltypes.Value) {
lagSec, err := row[0].ToUint32()
// if the error is not nil, ReplicationLagSeconds will remain to be MaxUint32
if err == nil {
@@ -234,7 +235,7 @@ func fetchStatusForGroupReplication(c *Conn, query string, onResult func([]sqlty
// primaryStatus returns the result of 'SHOW MASTER STATUS',
// with parsed executed position.
-func (mysqlGRFlavor) primaryStatus(c *Conn) (PrimaryStatus, error) {
+func (mysqlGRFlavor) primaryStatus(c *Conn) (replication.PrimaryStatus, error) {
return mysqlFlavor{}.primaryStatus(c)
}
diff --git a/go/mysql/flavor_mysqlgr_test.go b/go/mysql/flavor_mysqlgr_test.go
index 6b15ee5048e..df7876eca1c 100644
--- a/go/mysql/flavor_mysqlgr_test.go
+++ b/go/mysql/flavor_mysqlgr_test.go
@@ -20,12 +20,14 @@ import (
"gotest.tools/assert"
+ "vitess.io/vitess/go/mysql/replication"
+
"vitess.io/vitess/go/sqltypes"
querypb "vitess.io/vitess/go/vt/proto/query"
)
func TestMysqlGRParsePrimaryGroupMember(t *testing.T) {
- res := ReplicationStatus{}
+ res := replication.ReplicationStatus{}
rows := []sqltypes.Value{
sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("host1")),
sqltypes.MakeTrusted(querypb.Type_INT32, []byte("10")),
@@ -33,12 +35,12 @@ func TestMysqlGRParsePrimaryGroupMember(t *testing.T) {
parsePrimaryGroupMember(&res, rows)
assert.Equal(t, "host1", res.SourceHost)
assert.Equal(t, int32(10), res.SourcePort)
- assert.Equal(t, ReplicationStateUnknown, res.IOState)
- assert.Equal(t, ReplicationStateUnknown, res.SQLState)
+ assert.Equal(t, replication.ReplicationStateUnknown, res.IOState)
+ assert.Equal(t, replication.ReplicationStateUnknown, res.SQLState)
}
func TestMysqlGRReplicationApplierLagParse(t *testing.T) {
- res := ReplicationStatus{}
+ res := replication.ReplicationStatus{}
row := []sqltypes.Value{
sqltypes.MakeTrusted(querypb.Type_INT32, []byte("NULL")),
}
diff --git a/go/mysql/handshake_test.go b/go/mysql/handshake_test.go
index b6532f830b3..c2b27d6f6d4 100644
--- a/go/mysql/handshake_test.go
+++ b/go/mysql/handshake_test.go
@@ -45,7 +45,7 @@ func TestClearTextClientAuth(t *testing.T) {
defer authServer.close()
// Create the listener.
- l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false)
+ l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0)
require.NoError(t, err, "NewListener failed: %v", err)
defer l.Close()
host := l.Addr().(*net.TCPAddr).IP.String()
@@ -99,7 +99,7 @@ func TestSSLConnection(t *testing.T) {
defer authServer.close()
// Create the listener, so we can get its host.
- l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false)
+ l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0)
require.NoError(t, err, "NewListener failed: %v", err)
defer l.Close()
host := l.Addr().(*net.TCPAddr).IP.String()
diff --git a/go/mysql/hex/hex.go b/go/mysql/hex/hex.go
index 941cc08e5b5..d2aa00d592e 100644
--- a/go/mysql/hex/hex.go
+++ b/go/mysql/hex/hex.go
@@ -71,10 +71,10 @@ func DecodedLen(src []byte) int {
return (len(src) + 1) / 2
}
-func DecodeBytes(dst, src []byte) bool {
+func DecodeBytes(dst, src []byte) error {
if len(src)&1 == 1 {
src = append([]byte{'0'}, src...)
}
_, err := hex.Decode(dst, src)
- return err == nil
+ return err
}
diff --git a/go/mysql/icuregex/compiler.go b/go/mysql/icuregex/compiler.go
new file mode 100644
index 00000000000..971cd439fb3
--- /dev/null
+++ b/go/mysql/icuregex/compiler.go
@@ -0,0 +1,3646 @@
+/*
+© 2016 and later: Unicode, Inc. and others.
+Copyright (C) 2004-2015, International Business Machines Corporation and others.
+Copyright 2023 The Vitess Authors.
+
+This file contains code derived from the Unicode Project's ICU library.
+License & terms of use for the original code: http://www.unicode.org/copyright.html
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package icuregex
+
+import (
+ "math"
+ "slices"
+ "strings"
+ "unicode/utf8"
+
+ "vitess.io/vitess/go/mysql/icuregex/internal/pattern"
+ "vitess.io/vitess/go/mysql/icuregex/internal/ucase"
+ "vitess.io/vitess/go/mysql/icuregex/internal/uchar"
+ "vitess.io/vitess/go/mysql/icuregex/internal/unames"
+ "vitess.io/vitess/go/mysql/icuregex/internal/uprops"
+ "vitess.io/vitess/go/mysql/icuregex/internal/uset"
+ "vitess.io/vitess/go/mysql/icuregex/internal/utf16"
+)
+
+const BreakIteration = false
+const stackSize = 100
+
+type reChar struct {
+ char rune
+ quoted bool
+}
+
+const (
+ parenPlain = -1
+ parenCapturing = -2
+ parenAtomic = -3
+ parenLookahead = -4
+ parenNegLookahead = -5
+ parenFlags = -6
+ parenLookBehind = -7
+ parenLookBehindN = -8
+)
+
+type setOperation uint32
+
+const (
+ setStart setOperation = 0<<16 | 1
+ setEnd setOperation = 1<<16 | 2
+ setNegation setOperation = 2<<16 | 3
+ setCaseClose setOperation = 2<<16 | 9
+ setDifference2 setOperation = 3<<16 | 4 // '--' set difference operator
+ setIntersection2 setOperation = 3<<16 | 5 // '&&' set intersection operator
+ setUnion setOperation = 4<<16 | 6 // implicit union of adjacent items
+ setDifference1 setOperation = 4<<16 | 7 // '-', single dash difference op, for compatibility with old UnicodeSet.
+ setIntersection1 setOperation = 4<<16 | 8 // '&', single amp intersection op, for compatibility with old UnicodeSet.
+)
+
+type compiler struct {
+ err error
+ out *Pattern
+ p []rune
+
+ scanIndex int
+ quoteMode bool
+ inBackslashQuote bool
+ eolComments bool
+
+ lineNum int
+ charNum int
+ lastChar rune
+ peekChar rune
+
+ c reChar
+ stack [stackSize]uint16
+ stackPtr int
+
+ modeFlags RegexpFlag
+ newModeFlags RegexpFlag
+ setModeFlag bool
+
+ literalChars []rune
+
+ parenStack []int
+ matchOpenParen int
+ matchCloseParen int
+
+ intervalLow int
+ intervalUpper int
+
+ setStack []*uset.UnicodeSet
+ setOpStack []setOperation
+
+ lastSetLiteral rune
+ captureName *strings.Builder
+}
+
+func newCompiler(pat *Pattern) *compiler {
+ return &compiler{
+ out: pat,
+ scanIndex: 0,
+ eolComments: true,
+ lineNum: 1,
+ charNum: 0,
+ lastChar: -1,
+ peekChar: -1,
+ modeFlags: RegexpFlag(uint32(pat.flags) | 0x80000000),
+ matchOpenParen: -1,
+ matchCloseParen: -1,
+ lastSetLiteral: -1,
+ }
+}
+
+func (c *compiler) nextCharLL() (ch rune) {
+ if c.peekChar != -1 {
+ ch, c.peekChar = c.peekChar, -1
+ return
+ }
+ if len(c.p) == 0 {
+ return -1
+ }
+
+ ch = c.p[0]
+ c.p = c.p[1:]
+ if ch == utf8.RuneError {
+ return -1
+ }
+
+ if ch == chCR || ch == chNEL || ch == chLS || (ch == chLF && c.lastChar != chCR) {
+ c.lineNum++
+ c.charNum = 0
+ } else {
+ if ch != chLF {
+ c.charNum++
+ }
+ }
+ c.lastChar = ch
+ return
+}
+
+func (c *compiler) peekCharLL() rune {
+ if c.peekChar == -1 {
+ c.peekChar = c.nextCharLL()
+ }
+ return c.peekChar
+}
+
+func (c *compiler) nextChar(ch *reChar) {
+ c.scanIndex++
+ ch.char = c.nextCharLL()
+ ch.quoted = false
+
+ if c.quoteMode {
+ ch.quoted = true
+ if (ch.char == chBackSlash && c.peekCharLL() == chE && ((c.modeFlags & Literal) == 0)) ||
+ ch.char == -1 {
+ c.quoteMode = false // Exit quote mode,
+ c.nextCharLL() // discard the E
+ c.nextChar(ch)
+ return
+ }
+ } else if c.inBackslashQuote {
+ // The current character immediately follows a '\'
+ // Don't check for any further escapes, just return it as-is.
+ // Don't set c.fQuoted, because that would prevent the state machine from
+ // dispatching on the character.
+ c.inBackslashQuote = false
+ } else {
+ // We are not in a \Q quoted region \E of the source.
+ //
+ if (c.modeFlags & Comments) != 0 {
+ //
+ // We are in free-spacing and comments mode.
+ // Scan through any white space and comments, until we
+ // reach a significant character or the end of input.
+ for {
+ if ch.char == -1 {
+ break // End of Input
+ }
+ if ch.char == chPound && c.eolComments {
+ // Start of a comment. Consume the rest of it, until EOF or a new line
+ for {
+ ch.char = c.nextCharLL()
+ if ch.char == -1 || // EOF
+ ch.char == chCR ||
+ ch.char == chLF ||
+ ch.char == chNEL ||
+ ch.char == chLS {
+ break
+ }
+ }
+ }
+ // TODO: check what Java & Perl do with non-ASCII white spaces. Ticket 6061.
+ if !pattern.IsWhitespace(ch.char) {
+ break
+ }
+ ch.char = c.nextCharLL()
+ }
+ }
+
+ //
+ // check for backslash escaped characters.
+ //
+ if ch.char == chBackSlash {
+ beforeEscape := c.p
+ if staticSetUnescape.ContainsRune(c.peekCharLL()) {
+ //
+ // A '\' sequence that is handled by ICU's standard unescapeAt function.
+ // Includes \uxxxx, \n, \r, many others.
+ // Return the single equivalent character.
+ //
+ c.nextCharLL() // get & discard the peeked char.
+ ch.quoted = true
+
+ ch.char, c.p = pattern.UnescapeAtRunes(beforeEscape)
+ if ch.char < 0 {
+ c.error(BadEscapeSequence)
+ }
+ c.charNum += len(beforeEscape) - len(c.p)
+ } else if c.peekCharLL() == chDigit0 {
+ // Octal Escape, using Java Regexp Conventions
+ // which are \0 followed by 1-3 octal digits.
+ // Different from ICU Unescape handling of Octal, which does not
+ // require the leading 0.
+ // Java also has the convention of only consuming 2 octal digits if
+ // the three digit number would be > 0xff
+ //
+ ch.char = 0
+ c.nextCharLL() // Consume the initial 0.
+ for index := 0; index < 3; index++ {
+ ch2 := c.peekCharLL()
+ if ch2 < chDigit0 || ch2 > chDigit7 {
+ if index == 0 {
+ // \0 is not followed by any octal digits.
+ c.error(BadEscapeSequence)
+ }
+ break
+ }
+ ch.char <<= 3
+ ch.char += ch2 & 7
+ if ch.char <= 255 {
+ c.nextCharLL()
+ } else {
+ // The last digit made the number too big. Forget we saw it.
+ ch.char >>= 3
+ }
+ }
+ ch.quoted = true
+ } else if c.peekCharLL() == chQ {
+ // "\Q" enter quote mode, which will continue until "\E"
+ c.quoteMode = true
+ c.nextCharLL() // discard the 'Q'.
+ c.nextChar(ch) // recurse to get the real next char.
+ return
+ } else {
+ // We are in a '\' escape that will be handled by the state table scanner.
+ // Just return the backslash, but remember that the following char is to
+ // be taken literally.
+ c.inBackslashQuote = true
+ }
+ }
+ }
+
+ // re-enable # to end-of-line comments, in case they were disabled.
+ // They are disabled by the parser upon seeing '(?', but this lasts for
+ // the fetching of the next character only.
+ c.eolComments = true
+}
+
+const (
+ chCR = 0x0d // New lines, for terminating comments.
+ chLF = 0x0a // Line Feed
+ chPound = 0x23 // '#', introduces a comment.
+ chDigit0 = 0x30 // '0'
+ chDigit7 = 0x37 // '9'
+ chColon = 0x3A // ':'
+ chE = 0x45 // 'E'
+ chQ = 0x51 // 'Q'
+ chN = 0x4E // 'N'
+ chP = 0x50 // 'P'
+ chBackSlash = 0x5c // '\' introduces a char escape
+ chLBracket = 0x5b // '['
+ chRBracket = 0x5d // ']'
+ chUp = 0x5e // '^'
+ chLowerP = 0x70
+ chLBrace = 0x7b // '{'
+ chRBrace = 0x7d // '}'
+ chNEL = 0x85 // NEL newline variant
+ chLS = 0x2028 // Unicode Line Separator
+ chAmp = 0x26 // '&'
+ chDash = 0x2d // '-'
+)
+
+func (c *compiler) compile(pat []rune) error {
+ if c.err != nil {
+ return c.err
+ }
+ if c.out.pattern != "" {
+ panic("cannot reuse pattern")
+ }
+
+ c.out.pattern = string(pat)
+ c.p = pat
+
+ var state uint16 = 1
+ var table []regexTableEl
+
+ // UREGEX_LITERAL force entire pattern to be treated as a literal string.
+ if c.modeFlags&Literal != 0 {
+ c.quoteMode = true
+ }
+
+ c.nextChar(&c.c)
+
+ // Main loop for the regex pattern parsing state machine.
+ // Runs once per state transition.
+ // Each time through optionally performs, depending on the state table,
+ // - an advance to the the next pattern char
+ // - an action to be performed.
+ // - pushing or popping a state to/from the local state return stack.
+ // file regexcst.txt is the source for the state table. The logic behind
+ // recongizing the pattern syntax is there, not here.
+ for {
+ if c.err != nil {
+ break
+ }
+
+ if state == 0 {
+ panic("bad state?")
+ }
+
+ table = parseStateTable[state:]
+ for len(table) > 0 {
+ if table[0].charClass < 127 && !c.c.quoted && rune(table[0].charClass) == c.c.char {
+ break
+ }
+ if table[0].charClass == 255 {
+ break
+ }
+ if table[0].charClass == 254 && c.c.quoted {
+ break
+ }
+ if table[0].charClass == 253 && c.c.char == -1 {
+ break
+ }
+ if table[0].charClass >= 128 && table[0].charClass < 240 && !c.c.quoted && c.c.char != -1 {
+ if staticRuleSet[table[0].charClass-128].ContainsRune(c.c.char) {
+ break
+ }
+ }
+
+ table = table[1:]
+ }
+
+ if !c.doParseActions(table[0].action) {
+ break
+ }
+
+ if table[0].pushState != 0 {
+ c.stackPtr++
+ if c.stackPtr >= stackSize {
+ c.error(InternalError)
+ c.stackPtr--
+ }
+ c.stack[c.stackPtr] = uint16(table[0].pushState)
+ }
+
+ if table[0].nextChar {
+ c.nextChar(&c.c)
+ }
+
+ if table[0].nextState != 255 {
+ state = uint16(table[0].nextState)
+ } else {
+ state = c.stack[c.stackPtr]
+ c.stackPtr--
+ if c.stackPtr < 0 {
+ c.stackPtr++
+ c.error(MismatchedParen)
+ }
+ }
+ }
+
+ if c.err != nil {
+ return c.err
+ }
+
+ c.allocateStackData(restackframeHdrCount)
+ c.stripNOPs()
+
+ c.out.minMatchLen = c.minMatchLength(3, len(c.out.compiledPat)-1)
+
+ c.matchStartType()
+ return c.err
+}
+
+func (c *compiler) doParseActions(action patternParseAction) bool {
+ switch action {
+ case doPatStart:
+ // Start of pattern compiles to:
+ //0 SAVE 2 Fall back to position of FAIL
+ //1 jmp 3
+ //2 FAIL Stop if we ever reach here.
+ //3 NOP Dummy, so start of pattern looks the same as
+ // the start of an ( grouping.
+ //4 NOP Resreved, will be replaced by a save if there are
+ // OR | operators at the top level
+ c.appendOp(urxStateSave, 2)
+ c.appendOp(urxJmp, 3)
+ c.appendOp(urxFail, 0)
+
+ // Standard open nonCapture paren action emits the two NOPs and
+ // sets up the paren stack frame.
+ c.doParseActions(doOpenNonCaptureParen)
+
+ case doPatFinish:
+ // We've scanned to the end of the pattern
+ // The end of pattern compiles to:
+ // URX_END
+ // which will stop the runtime match engine.
+ // Encountering end of pattern also behaves like a close paren,
+ // and forces fixups of the State Save at the beginning of the compiled pattern
+ // and of any OR operations at the top level.
+ //
+ c.handleCloseParen()
+ if len(c.parenStack) > 0 {
+ // Missing close paren in pattern.
+ c.error(MismatchedParen)
+ }
+
+ // add the END operation to the compiled pattern.
+ c.appendOp(urxEnd, 0)
+
+ // Terminate the pattern compilation state machine.
+ return false
+
+ case doOrOperator:
+ // Scanning a '|', as in (A|B)
+ // Generate code for any pending literals preceding the '|'
+ c.fixLiterals(false)
+
+ // Insert a SAVE operation at the start of the pattern section preceding
+ // this OR at this level. This SAVE will branch the match forward
+ // to the right hand side of the OR in the event that the left hand
+ // side fails to match and backtracks. Locate the position for the
+ // save from the location on the top of the parentheses stack.
+ var savePosition int
+ savePosition, c.parenStack = stackPop(c.parenStack)
+ op := c.out.compiledPat[savePosition]
+
+ if op.typ() != urxNop {
+ panic("expected a NOP placeholder")
+ }
+
+ op = c.buildOp(urxStateSave, len(c.out.compiledPat)+1)
+ c.out.compiledPat[savePosition] = op
+
+ // Append an JMP operation into the compiled pattern. The operand for
+ // the JMP will eventually be the location following the ')' for the
+ // group. This will be patched in later, when the ')' is encountered.
+ c.appendOp(urxJmp, 0)
+
+ // Push the position of the newly added JMP op onto the parentheses stack.
+ // This registers if for fixup when this block's close paren is encountered.
+ c.parenStack = append(c.parenStack, len(c.out.compiledPat)-1)
+
+ // Append a NOP to the compiled pattern. This is the slot reserved
+ // for a SAVE in the event that there is yet another '|' following
+ // this one.
+ c.appendOp(urxNop, 0)
+ c.parenStack = append(c.parenStack, len(c.out.compiledPat)-1)
+
+ case doBeginNamedCapture:
+ // Scanning (?
+ // Compile to a
+ // - NOP, which later may be replaced if the parenthesized group
+ // has a quantifier, followed by
+ // - STO_SP save state stack position, so it can be restored at the ")"
+ // - NOP, which may later be replaced by a save-state if there
+ // is an '|' alternation within the parens.
+ c.fixLiterals(false)
+ c.appendOp(urxNop, 0)
+ varLoc := c.allocateData(1) // Reserve a data location for saving the state stack ptr.
+ c.appendOp(urxStoSp, varLoc)
+ c.appendOp(urxNop, 0)
+
+ // On the Parentheses stack, start a new frame and add the postions
+ // of the two NOPs. Depending on what follows in the pattern, the
+ // NOPs may be changed to SAVE_STATE or JMP ops, with a target
+ // address of the end of the parenthesized group.
+ c.parenStack = append(c.parenStack, int(c.modeFlags))
+ c.parenStack = append(c.parenStack, parenAtomic)
+ c.parenStack = append(c.parenStack, len(c.out.compiledPat)-3)
+ c.parenStack = append(c.parenStack, len(c.out.compiledPat)-1)
+
+ case doOpenLookAhead:
+ // Positive Look-ahead (?= stuff )
+ //
+ // Note: Addition of transparent input regions, with the need to
+ // restore the original regions when failing out of a lookahead
+ // block, complicated this sequence. Some combined opcodes
+ // might make sense - or might not, lookahead aren't that common.
+ //
+ // Caution: min match length optimization knows about this
+ // sequence; don't change without making updates there too.
+ //
+ // Compiles to
+ // 1 LA_START dataLoc Saves SP, Input Pos, Active input region.
+ // 2. STATE_SAVE 4 on failure of lookahead, goto 4
+ // 3 JMP 6 continue ...
+ //
+ // 4. LA_END Look Ahead failed. Restore regions.
+ // 5. BACKTRACK and back track again.
+ //
+ // 6. NOP reserved for use by quantifiers on the block.
+ // Look-ahead can't have quantifiers, but paren stack
+ // compile time conventions require the slot anyhow.
+ // 7. NOP may be replaced if there is are '|' ops in the block.
+ // 8. code for parenthesized stuff.
+ // 9. LA_END
+ //
+ // Four data slots are reserved, for saving state on entry to the look-around
+ // 0: stack pointer on entry.
+ // 1: input position on entry.
+ // 2: fActiveStart, the active bounds start on entry.
+ // 3: fActiveLimit, the active bounds limit on entry.
+ c.fixLiterals(false)
+ dataLoc := c.allocateData(4)
+ c.appendOp(urxLaStart, dataLoc)
+ c.appendOp(urxStateSave, len(c.out.compiledPat)+2)
+ c.appendOp(urxJmp, len(c.out.compiledPat)+3)
+ c.appendOp(urxLaEnd, dataLoc)
+ c.appendOp(urxBacktrack, 0)
+ c.appendOp(urxNop, 0)
+ c.appendOp(urxNop, 0)
+
+ // On the Parentheses stack, start a new frame and add the postions
+ // of the NOPs.
+ c.parenStack = append(c.parenStack, int(c.modeFlags))
+ c.parenStack = append(c.parenStack, parenLookahead)
+ c.parenStack = append(c.parenStack, len(c.out.compiledPat)-2)
+ c.parenStack = append(c.parenStack, len(c.out.compiledPat)-1)
+
+ case doOpenLookAheadNeg:
+ // Negated Lookahead. (?! stuff )
+ // Compiles to
+ // 1. LA_START dataloc
+ // 2. SAVE_STATE 7 // Fail within look-ahead block restores to this state,
+ // // which continues with the match.
+ // 3. NOP // Std. Open Paren sequence, for possible '|'
+ // 4. code for parenthesized stuff.
+ // 5. LA_END // Cut back stack, remove saved state from step 2.
+ // 6. BACKTRACK // code in block succeeded, so neg. lookahead fails.
+ // 7. END_LA // Restore match region, in case look-ahead was using
+ // an alternate (transparent) region.
+ // Four data slots are reserved, for saving state on entry to the look-around
+ // 0: stack pointer on entry.
+ // 1: input position on entry.
+ // 2: fActiveStart, the active bounds start on entry.
+ // 3: fActiveLimit, the active bounds limit on entry.
+ c.fixLiterals(false)
+ dataLoc := c.allocateData(4)
+ c.appendOp(urxLaStart, dataLoc)
+ c.appendOp(urxStateSave, 0) // dest address will be patched later.
+ c.appendOp(urxNop, 0)
+
+ // On the Parentheses stack, start a new frame and add the postions
+ // of the StateSave and NOP.
+ c.parenStack = append(c.parenStack, int(c.modeFlags))
+ c.parenStack = append(c.parenStack, parenNegLookahead)
+ c.parenStack = append(c.parenStack, len(c.out.compiledPat)-2)
+ c.parenStack = append(c.parenStack, len(c.out.compiledPat)-1)
+
+ // Instructions #5 - #7 will be added when the ')' is encountered.
+
+ case doOpenLookBehind:
+ // Compile a (?<= look-behind open paren.
+ //
+ // Compiles to
+ // 0 URX_LB_START dataLoc
+ // 1 URX_LB_CONT dataLoc
+ // 2 MinMatchLen
+ // 3 MaxMatchLen
+ // 4 URX_NOP Standard '(' boilerplate.
+ // 5 URX_NOP Reserved slot for use with '|' ops within (block).
+ // 6
+ // 7 URX_LB_END dataLoc # Check match len, restore input len
+ // 8 URX_LA_END dataLoc # Restore stack, input pos
+ //
+ // Allocate a block of matcher data, to contain (when running a match)
+ // 0: Stack ptr on entry
+ // 1: Input Index on entry
+ // 2: fActiveStart, the active bounds start on entry.
+ // 3: fActiveLimit, the active bounds limit on entry.
+ // 4: Start index of match current match attempt.
+ // The first four items must match the layout of data for LA_START / LA_END
+
+ // Generate match code for any pending literals.
+ c.fixLiterals(false)
+
+ // Allocate data space
+ dataLoc := c.allocateData(5)
+
+ // Emit URX_LB_START
+ c.appendOp(urxLbStart, dataLoc)
+
+ // Emit URX_LB_CONT
+ c.appendOp(urxLbCont, dataLoc)
+ c.appendOp(urxReservedOp, 0) // MinMatchLength. To be filled later.
+ c.appendOp(urxReservedOp, 0) // MaxMatchLength. To be filled later.
+
+ // Emit the NOPs
+ c.appendOp(urxNop, 0)
+ c.appendOp(urxNop, 0)
+
+ // On the Parentheses stack, start a new frame and add the postions
+ // of the URX_LB_CONT and the NOP.
+ c.parenStack = append(c.parenStack, int(c.modeFlags))
+ c.parenStack = append(c.parenStack, parenLookBehind)
+ c.parenStack = append(c.parenStack, len(c.out.compiledPat)-2)
+ c.parenStack = append(c.parenStack, len(c.out.compiledPat)-1)
+
+ // The final two instructions will be added when the ')' is encountered.
+
+ case doOpenLookBehindNeg:
+ // Compile a (?
+ // 8 URX_LBN_END dataLoc # Check match len, cause a FAIL
+ // 9 ...
+ //
+ // Allocate a block of matcher data, to contain (when running a match)
+ // 0: Stack ptr on entry
+ // 1: Input Index on entry
+ // 2: fActiveStart, the active bounds start on entry.
+ // 3: fActiveLimit, the active bounds limit on entry.
+ // 4: Start index of match current match attempt.
+ // The first four items must match the layout of data for LA_START / LA_END
+
+ // Generate match code for any pending literals.
+ c.fixLiterals(false)
+
+ // Allocate data space
+ dataLoc := c.allocateData(5)
+
+ // Emit URX_LB_START
+ c.appendOp(urxLbStart, dataLoc)
+
+ // Emit URX_LBN_CONT
+ c.appendOp(urxLbnCount, dataLoc)
+ c.appendOp(urxReservedOp, 0) // MinMatchLength. To be filled later.
+ c.appendOp(urxReservedOp, 0) // MaxMatchLength. To be filled later.
+ c.appendOp(urxReservedOp, 0) // Continue Loc. To be filled later.
+
+ // Emit the NOPs
+ c.appendOp(urxNop, 0)
+ c.appendOp(urxNop, 0)
+
+ // On the Parentheses stack, start a new frame and add the postions
+ // of the URX_LB_CONT and the NOP.
+ c.parenStack = append(c.parenStack, int(c.modeFlags))
+ c.parenStack = append(c.parenStack, parenLookBehindN)
+ c.parenStack = append(c.parenStack, len(c.out.compiledPat)-2)
+ c.parenStack = append(c.parenStack, len(c.out.compiledPat)-1)
+
+ // The final two instructions will be added when the ')' is encountered.
+
+ case doConditionalExpr, doPerlInline:
+ // Conditionals such as (?(1)a:b)
+ // Perl inline-condtionals. (?{perl code}a|b) We're not perl, no way to do them.
+ c.error(Unimplemented)
+
+ case doCloseParen:
+ c.handleCloseParen()
+ if len(c.parenStack) == 0 {
+ // Extra close paren, or missing open paren.
+ c.error(MismatchedParen)
+ }
+
+ case doNOP:
+
+ case doBadOpenParenType, doRuleError:
+ c.error(RuleSyntax)
+
+ case doMismatchedParenErr:
+ c.error(MismatchedParen)
+
+ case doPlus:
+ // Normal '+' compiles to
+ // 1. stuff to be repeated (already built)
+ // 2. jmp-sav 1
+ // 3. ...
+ //
+ // Or, if the item to be repeated can match a zero length string,
+ // 1. STO_INP_LOC data-loc
+ // 2. body of stuff to be repeated
+ // 3. JMP_SAV_X 2
+ // 4. ...
+
+ //
+ // Or, if the item to be repeated is simple
+ // 1. Item to be repeated.
+ // 2. LOOP_SR_I set number (assuming repeated item is a set ref)
+ // 3. LOOP_C stack location
+ topLoc := c.blockTopLoc(false) // location of item #1
+
+ // Check for simple constructs, which may get special optimized code.
+ if topLoc == len(c.out.compiledPat)-1 {
+ repeatedOp := c.out.compiledPat[topLoc]
+
+ if repeatedOp.typ() == urxSetref {
+ // Emit optimized code for [char set]+
+ c.appendOp(urxLoopSrI, repeatedOp.value())
+ frameLoc := c.allocateStackData(1)
+ c.appendOp(urxLoopC, frameLoc)
+ break
+ }
+
+ if repeatedOp.typ() == urxDotany || repeatedOp.typ() == urxDotanyAll || repeatedOp.typ() == urxDotanyUnix {
+ // Emit Optimized code for .+ operations.
+ loopOpI := c.buildOp(urxLoopDotI, 0)
+ if repeatedOp.typ() == urxDotanyAll {
+ // URX_LOOP_DOT_I operand is a flag indicating ". matches any" mode.
+ loopOpI |= 1
+ }
+ if c.modeFlags&UnixLines != 0 {
+ loopOpI |= 2
+ }
+ c.appendIns(loopOpI)
+ frameLoc := c.allocateStackData(1)
+ c.appendOp(urxLoopC, frameLoc)
+ break
+ }
+ }
+
+ // General case.
+
+ // Check for minimum match length of zero, which requires
+ // extra loop-breaking code.
+ if c.minMatchLength(topLoc, len(c.out.compiledPat)-1) == 0 {
+ // Zero length match is possible.
+ // Emit the code sequence that can handle it.
+ c.insertOp(topLoc)
+ frameLoc := c.allocateStackData(1)
+ op := c.buildOp(urxStoInpLoc, frameLoc)
+ c.out.compiledPat[topLoc] = op
+
+ c.appendOp(urxJmpSavX, topLoc+1)
+ } else {
+ // Simpler code when the repeated body must match something non-empty
+ c.appendOp(urxJmpSav, topLoc)
+ }
+
+ case doNGPlus:
+ // Non-greedy '+?' compiles to
+ // 1. stuff to be repeated (already built)
+ // 2. state-save 1
+ // 3. ...
+ topLoc := c.blockTopLoc(false)
+ c.appendOp(urxStateSave, topLoc)
+
+ case doOpt:
+ // Normal (greedy) ? quantifier.
+ // Compiles to
+ // 1. state save 3
+ // 2. body of optional block
+ // 3. ...
+ // Insert the state save into the compiled pattern, and we're done.
+ saveStateLoc := c.blockTopLoc(true)
+ saveStateOp := c.buildOp(urxStateSave, len(c.out.compiledPat))
+ c.out.compiledPat[saveStateLoc] = saveStateOp
+
+ case doNGOpt:
+ // Non-greedy ?? quantifier
+ // compiles to
+ // 1. jmp 4
+ // 2. body of optional block
+ // 3 jmp 5
+ // 4. state save 2
+ // 5 ...
+ // This code is less than ideal, with two jmps instead of one, because we can only
+ // insert one instruction at the top of the block being iterated.
+ jmp1Loc := c.blockTopLoc(true)
+ jmp2Loc := len(c.out.compiledPat)
+
+ jmp1Op := c.buildOp(urxJmp, jmp2Loc+1)
+ c.out.compiledPat[jmp1Loc] = jmp1Op
+
+ c.appendOp(urxJmp, jmp2Loc+2)
+ c.appendOp(urxStateSave, jmp1Loc+1)
+
+ case doStar:
+ // Normal (greedy) * quantifier.
+ // Compiles to
+ // 1. STATE_SAVE 4
+ // 2. body of stuff being iterated over
+ // 3. JMP_SAV 2
+ // 4. ...
+ //
+ // Or, if the body is a simple [Set],
+ // 1. LOOP_SR_I set number
+ // 2. LOOP_C stack location
+ // ...
+ //
+ // Or if this is a .*
+ // 1. LOOP_DOT_I (. matches all mode flag)
+ // 2. LOOP_C stack location
+ //
+ // Or, if the body can match a zero-length string, to inhibit infinite loops,
+ // 1. STATE_SAVE 5
+ // 2. STO_INP_LOC data-loc
+ // 3. body of stuff
+ // 4. JMP_SAV_X 2
+ // 5. ...
+ // location of item #1, the STATE_SAVE
+ topLoc := c.blockTopLoc(false)
+
+ // Check for simple *, where the construct being repeated
+ // compiled to single opcode, and might be optimizable.
+ if topLoc == len(c.out.compiledPat)-1 {
+ repeatedOp := c.out.compiledPat[topLoc]
+
+ if repeatedOp.typ() == urxSetref {
+ // Emit optimized code for a [char set]*
+ loopOpI := c.buildOp(urxLoopSrI, repeatedOp.value())
+ c.out.compiledPat[topLoc] = loopOpI
+ dataLoc := c.allocateStackData(1)
+ c.appendOp(urxLoopC, dataLoc)
+ break
+ }
+
+ if repeatedOp.typ() == urxDotany || repeatedOp.typ() == urxDotanyAll || repeatedOp.typ() == urxDotanyUnix {
+ // Emit Optimized code for .* operations.
+ loopOpI := c.buildOp(urxLoopDotI, 0)
+ if repeatedOp.typ() == urxDotanyAll {
+ // URX_LOOP_DOT_I operand is a flag indicating . matches any mode.
+ loopOpI |= 1
+ }
+ if (c.modeFlags & UnixLines) != 0 {
+ loopOpI |= 2
+ }
+ c.out.compiledPat[topLoc] = loopOpI
+ dataLoc := c.allocateStackData(1)
+ c.appendOp(urxLoopC, dataLoc)
+ break
+ }
+ }
+
+ // Emit general case code for this *
+ // The optimizations did not apply.
+
+ saveStateLoc := c.blockTopLoc(true)
+ jmpOp := c.buildOp(urxJmpSav, saveStateLoc+1)
+
+ // Check for minimum match length of zero, which requires
+ // extra loop-breaking code.
+ if c.minMatchLength(saveStateLoc, len(c.out.compiledPat)-1) == 0 {
+ c.insertOp(saveStateLoc)
+ dataLoc := c.allocateStackData(1)
+
+ op := c.buildOp(urxStoInpLoc, dataLoc)
+ c.out.compiledPat[saveStateLoc+1] = op
+ jmpOp = c.buildOp(urxJmpSavX, saveStateLoc+2)
+ }
+
+ // Locate the position in the compiled pattern where the match will continue
+ // after completing the *. (4 or 5 in the comment above)
+ continueLoc := len(c.out.compiledPat) + 1
+
+ // Put together the save state op and store it into the compiled code.
+ saveStateOp := c.buildOp(urxStateSave, continueLoc)
+ c.out.compiledPat[saveStateLoc] = saveStateOp
+
+ // Append the URX_JMP_SAV or URX_JMPX operation to the compiled pattern.
+ c.appendIns(jmpOp)
+
+ case doNGStar:
+ // Non-greedy *? quantifier
+ // compiles to
+ // 1. JMP 3
+ // 2. body of stuff being iterated over
+ // 3. STATE_SAVE 2
+ // 4 ...
+ jmpLoc := c.blockTopLoc(true) // loc 1.
+ saveLoc := len(c.out.compiledPat) // loc 3.
+ jmpOp := c.buildOp(urxJmp, saveLoc)
+ c.out.compiledPat[jmpLoc] = jmpOp
+ c.appendOp(urxStateSave, jmpLoc+1)
+
+ case doIntervalInit:
+ // The '{' opening an interval quantifier was just scanned.
+ // Init the counter varaiables that will accumulate the values as the digits
+ // are scanned.
+ c.intervalLow = 0
+ c.intervalUpper = -1
+
+ case doIntevalLowerDigit:
+ // Scanned a digit from the lower value of an {lower,upper} interval
+ digitValue := uCharDigitValue(c.c.char)
+ val := int64(c.intervalLow)*10 + digitValue
+ if val > math.MaxInt32 {
+ c.error(NumberTooBig)
+ } else {
+ c.intervalLow = int(val)
+ }
+
+ case doIntervalUpperDigit:
+ // Scanned a digit from the upper value of an {lower,upper} interval
+ if c.intervalUpper < 0 {
+ c.intervalUpper = 0
+ }
+ digitValue := uCharDigitValue(c.c.char)
+ val := int64(c.intervalUpper)*10 + digitValue
+ if val > math.MaxInt32 {
+ c.error(NumberTooBig)
+ } else {
+ c.intervalUpper = int(val)
+ }
+
+ case doIntervalSame:
+ // Scanned a single value interval like {27}. Upper = Lower.
+ c.intervalUpper = c.intervalLow
+
+ case doInterval:
+ // Finished scanning a normal {lower,upper} interval. Generate the code for it.
+ if !c.compileInlineInterval() {
+ c.compileInterval(urxCtrInit, utxCtrLoop)
+ }
+
+ case doPossessiveInterval:
+ // Finished scanning a Possessive {lower,upper}+ interval. Generate the code for it.
+
+ // Remember the loc for the top of the block being looped over.
+ // (Can not reserve a slot in the compiled pattern at this time, because
+ // compileInterval needs to reserve also, and blockTopLoc can only reserve
+ // once per block.)
+ topLoc := c.blockTopLoc(false)
+
+ // Produce normal looping code.
+ c.compileInterval(urxCtrInit, utxCtrLoop)
+
+ // Surround the just-emitted normal looping code with a STO_SP ... LD_SP
+ // just as if the loop was inclosed in atomic parentheses.
+
+ // First the STO_SP before the start of the loop
+ c.insertOp(topLoc)
+
+ varLoc := c.allocateData(1) // Reserve a data location for saving the
+ op := c.buildOp(urxStoSp, varLoc)
+ c.out.compiledPat[topLoc] = op
+
+ var loopOp instruction
+ loopOp, c.out.compiledPat = stackPop(c.out.compiledPat)
+ if loopOp.typ() != utxCtrLoop || loopOp.value() != topLoc {
+ panic("bad instruction at the end of compiled pattern")
+ }
+
+ loopOp++ // point LoopOp after the just-inserted STO_SP
+ c.appendIns(loopOp)
+
+ // Then the LD_SP after the end of the loop
+ c.appendOp(urxLdSp, varLoc)
+
+ case doNGInterval:
+ // Finished scanning a non-greedy {lower,upper}? interval. Generate the code for it.
+ c.compileInterval(urxCtrInitNg, urxCtrLoopNg)
+
+ case doIntervalError:
+ c.error(BadInterval)
+
+ case doLiteralChar:
+ // We've just scanned a "normal" character from the pattern,
+ c.literalChar(c.c.char)
+
+ case doEscapedLiteralChar:
+ // We've just scanned an backslashed escaped character with no
+ // special meaning. It represents itself.
+ if (c.modeFlags&ErrorOnUnknownEscapes) != 0 && ((c.c.char >= 0x41 && c.c.char <= 0x5A) || /* in [A-Z] */ (c.c.char >= 0x61 && c.c.char <= 0x7a)) { // in [a-z]
+ c.error(BadEscapeSequence)
+ }
+ c.literalChar(c.c.char)
+
+ case doDotAny:
+ // scanned a ".", match any single character.
+ c.fixLiterals(false)
+ if (c.modeFlags & DotAll) != 0 {
+ c.appendOp(urxDotanyAll, 0)
+ } else if (c.modeFlags & UnixLines) != 0 {
+ c.appendOp(urxDotanyUnix, 0)
+ } else {
+ c.appendOp(urxDotany, 0)
+ }
+
+ case doCaret:
+ c.fixLiterals(false)
+ if (c.modeFlags&Multiline) == 0 && (c.modeFlags&UnixLines) == 0 {
+ c.appendOp(urxCaret, 0)
+ } else if (c.modeFlags&Multiline) != 0 && (c.modeFlags&UnixLines) == 0 {
+ c.appendOp(urxCaretM, 0)
+ } else if (c.modeFlags&Multiline) == 0 && (c.modeFlags&UnixLines) != 0 {
+ c.appendOp(urxCaret, 0) // Only testing true start of input.
+ } else if (c.modeFlags&Multiline) != 0 && (c.modeFlags&UnixLines) != 0 {
+ c.appendOp(urxCaretMUnix, 0)
+ }
+
+ case doDollar:
+ c.fixLiterals(false)
+ if (c.modeFlags&Multiline) == 0 && (c.modeFlags&UnixLines) == 0 {
+ c.appendOp(urxDollar, 0)
+ } else if (c.modeFlags&Multiline) != 0 && (c.modeFlags&UnixLines) == 0 {
+ c.appendOp(urxDollarM, 0)
+ } else if (c.modeFlags&Multiline) == 0 && (c.modeFlags&UnixLines) != 0 {
+ c.appendOp(urxDollarD, 0)
+ } else if (c.modeFlags&Multiline) != 0 && (c.modeFlags&UnixLines) != 0 {
+ c.appendOp(urxDollarMd, 0)
+ }
+
+ case doBackslashA:
+ c.fixLiterals(false)
+ c.appendOp(urxCaret, 0)
+
+ case doBackslashB:
+ if !BreakIteration {
+ if (c.modeFlags & UWord) != 0 {
+ c.error(Unimplemented)
+ }
+ }
+ c.fixLiterals(false)
+ if c.modeFlags&UWord != 0 {
+ c.appendOp(urxBackslashBu, 1)
+ } else {
+ c.appendOp(urxBackslashB, 1)
+ }
+
+ case doBackslashb:
+ if !BreakIteration {
+ if (c.modeFlags & UWord) != 0 {
+ c.error(Unimplemented)
+ }
+ }
+ c.fixLiterals(false)
+ if c.modeFlags&UWord != 0 {
+ c.appendOp(urxBackslashBu, 0)
+ } else {
+ c.appendOp(urxBackslashB, 0)
+ }
+
+ case doBackslashD:
+ c.fixLiterals(false)
+ c.appendOp(urxBackslashD, 1)
+
+ case doBackslashd:
+ c.fixLiterals(false)
+ c.appendOp(urxBackslashD, 0)
+
+ case doBackslashG:
+ c.fixLiterals(false)
+ c.appendOp(urxBackslashG, 0)
+
+ case doBackslashH:
+ c.fixLiterals(false)
+ c.appendOp(urxBackslashH, 1)
+
+ case doBackslashh:
+ c.fixLiterals(false)
+ c.appendOp(urxBackslashH, 0)
+
+ case doBackslashR:
+ c.fixLiterals(false)
+ c.appendOp(urxBackslashR, 0)
+
+ case doBackslashS:
+ c.fixLiterals(false)
+ c.appendOp(urxStatSetrefN, urxIsspaceSet)
+
+ case doBackslashs:
+ c.fixLiterals(false)
+ c.appendOp(urxStaticSetref, urxIsspaceSet)
+
+ case doBackslashV:
+ c.fixLiterals(false)
+ c.appendOp(urxBackslashV, 1)
+
+ case doBackslashv:
+ c.fixLiterals(false)
+ c.appendOp(urxBackslashV, 0)
+
+ case doBackslashW:
+ c.fixLiterals(false)
+ c.appendOp(urxStatSetrefN, urxIswordSet)
+
+ case doBackslashw:
+ c.fixLiterals(false)
+ c.appendOp(urxStaticSetref, urxIswordSet)
+
+ case doBackslashX:
+ if !BreakIteration {
+ // Grapheme Cluster Boundary requires ICU break iteration.
+ c.error(Unimplemented)
+ }
+ c.fixLiterals(false)
+ c.appendOp(urxBackslashX, 0)
+
+ case doBackslashZ:
+ c.fixLiterals(false)
+ c.appendOp(urxDollar, 0)
+
+ case doBackslashz:
+ c.fixLiterals(false)
+ c.appendOp(urxBackslashZ, 0)
+
+ case doEscapeError:
+ c.error(BadEscapeSequence)
+
+ case doExit:
+ c.fixLiterals(false)
+ return false
+
+ case doProperty:
+ c.fixLiterals(false)
+ theSet := c.scanProp()
+ c.compileSet(theSet)
+
+ case doNamedChar:
+ ch := c.scanNamedChar()
+ c.literalChar(ch)
+
+ case doBackRef:
+ // BackReference. Somewhat unusual in that the front-end can not completely parse
+ // the regular expression, because the number of digits to be consumed
+ // depends on the number of capture groups that have been defined. So
+ // we have to do it here instead.
+ numCaptureGroups := len(c.out.groupMap)
+ groupNum := int64(0)
+ ch := c.c.char
+
+ for {
+ // Loop once per digit, for max allowed number of digits in a back reference.
+ digit := uCharDigitValue(ch)
+ groupNum = groupNum*10 + digit
+ if groupNum >= int64(numCaptureGroups) {
+ break
+ }
+ ch = c.peekCharLL()
+ if !staticRuleSet[ruleSetDigitChar-128].ContainsRune(ch) {
+ break
+ }
+ c.nextCharLL()
+ }
+
+ // Scan of the back reference in the source regexp is complete. Now generate
+ // the compiled code for it.
+ // Because capture groups can be forward-referenced by back-references,
+ // we fill the operand with the capture group number. At the end
+ // of compilation, it will be changed to the variable's location.
+ if groupNum == 0 {
+ panic("\\0 begins an octal escape sequence, and shouldn't enter this code path at all")
+ }
+ c.fixLiterals(false)
+ if (c.modeFlags & CaseInsensitive) != 0 {
+ c.appendOp(urxBackrefI, int(groupNum))
+ } else {
+ c.appendOp(urxBackref, int(groupNum))
+ }
+
+ case doBeginNamedBackRef:
+ if c.captureName != nil {
+ panic("should not replace capture name")
+ }
+ c.captureName = &strings.Builder{}
+
+ case doContinueNamedBackRef:
+ c.captureName.WriteRune(c.c.char)
+
+ case doCompleteNamedBackRef:
+ {
+ groupNumber := c.out.namedCaptureMap[c.captureName.String()]
+ if groupNumber == 0 {
+ // Group name has not been defined.
+ // Could be a forward reference. If we choose to support them at some
+ // future time, extra mechanism will be required at this point.
+ c.error(InvalidCaptureGroupName)
+ } else {
+ // Given the number, handle identically to a \n numbered back reference.
+ // See comments above, under doBackRef
+ c.fixLiterals(false)
+ if (c.modeFlags & CaseInsensitive) != 0 {
+ c.appendOp(urxBackrefI, groupNumber)
+ } else {
+ c.appendOp(urxBackref, groupNumber)
+ }
+ }
+ c.captureName = nil
+ }
+
+ case doPossessivePlus:
+ // Possessive ++ quantifier.
+ // Compiles to
+ // 1. STO_SP
+ // 2. body of stuff being iterated over
+ // 3. STATE_SAVE 5
+ // 4. JMP 2
+ // 5. LD_SP
+ // 6. ...
+ //
+ // Note: TODO: This is pretty inefficient. A mass of saved state is built up
+ // then unconditionally discarded. Perhaps introduce a new opcode. Ticket 6056
+ //
+ // Emit the STO_SP
+ topLoc := c.blockTopLoc(true)
+ stoLoc := c.allocateData(1) // Reserve the data location for storing save stack ptr.
+ op := c.buildOp(urxStoSp, stoLoc)
+ c.out.compiledPat[topLoc] = op
+
+ // Emit the STATE_SAVE
+ c.appendOp(urxStateSave, len(c.out.compiledPat)+2)
+
+ // Emit the JMP
+ c.appendOp(urxJmp, topLoc+1)
+
+ // Emit the LD_SP
+ c.appendOp(urxLdSp, stoLoc)
+
+ case doPossessiveStar:
+ // Possessive *+ quantifier.
+ // Compiles to
+ // 1. STO_SP loc
+ // 2. STATE_SAVE 5
+ // 3. body of stuff being iterated over
+ // 4. JMP 2
+ // 5. LD_SP loc
+ // 6 ...
+ // TODO: do something to cut back the state stack each time through the loop.
+ // Reserve two slots at the top of the block.
+ topLoc := c.blockTopLoc(true)
+ c.insertOp(topLoc)
+
+ // emit STO_SP loc
+ stoLoc := c.allocateData(1) // Reserve the data location for storing save stack ptr.
+ op := c.buildOp(urxStoSp, stoLoc)
+ c.out.compiledPat[topLoc] = op
+
+ // Emit the SAVE_STATE 5
+ L7 := len(c.out.compiledPat) + 1
+ op = c.buildOp(urxStateSave, L7)
+ c.out.compiledPat[topLoc+1] = op
+
+ // Append the JMP operation.
+ c.appendOp(urxJmp, topLoc+1)
+
+ // Emit the LD_SP loc
+ c.appendOp(urxLdSp, stoLoc)
+
+ case doPossessiveOpt:
+ // Possessive ?+ quantifier.
+ // Compiles to
+ // 1. STO_SP loc
+ // 2. SAVE_STATE 5
+ // 3. body of optional block
+ // 4. LD_SP loc
+ // 5. ...
+ //
+ // Reserve two slots at the top of the block.
+ topLoc := c.blockTopLoc(true)
+ c.insertOp(topLoc)
+
+ // Emit the STO_SP
+ stoLoc := c.allocateData(1) // Reserve the data location for storing save stack ptr.
+ op := c.buildOp(urxStoSp, stoLoc)
+ c.out.compiledPat[topLoc] = op
+
+ // Emit the SAVE_STATE
+ continueLoc := len(c.out.compiledPat) + 1
+ op = c.buildOp(urxStateSave, continueLoc)
+ c.out.compiledPat[topLoc+1] = op
+
+ // Emit the LD_SP
+ c.appendOp(urxLdSp, stoLoc)
+
+ case doBeginMatchMode:
+ c.newModeFlags = c.modeFlags
+ c.setModeFlag = true
+ case doMatchMode: // (?i) and similar
+ var bit RegexpFlag
+ switch c.c.char {
+ case 0x69: /* 'i' */
+ bit = CaseInsensitive
+ case 0x64: /* 'd' */
+ bit = UnixLines
+ case 0x6d: /* 'm' */
+ bit = Multiline
+ case 0x73: /* 's' */
+ bit = DotAll
+ case 0x75: /* 'u' */
+ bit = 0 /* Unicode casing */
+ case 0x77: /* 'w' */
+ bit = UWord
+ case 0x78: /* 'x' */
+ bit = Comments
+ case 0x2d: /* '-' */
+ c.setModeFlag = false
+ default:
+ // Should never happen. Other chars are filtered out by the scanner.
+ panic("unreachable")
+ }
+ if c.setModeFlag {
+ c.newModeFlags |= bit
+ } else {
+ c.newModeFlags &= ^bit
+ }
+
+ case doSetMatchMode:
+ // Emit code to match any pending literals, using the not-yet changed match mode.
+ c.fixLiterals(false)
+
+ // We've got a (?i) or similar. The match mode is being changed, but
+ // the change is not scoped to a parenthesized block.
+ if c.newModeFlags >= 0 {
+ panic("cNewModeFlags not properly initialized")
+ }
+ c.modeFlags = c.newModeFlags
+
+ case doMatchModeParen:
+ // We've got a (?i: or similar. Begin a parenthesized block, save old
+ // mode flags so they can be restored at the close of the block.
+ //
+ // Compile to a
+ // - NOP, which later may be replaced by a save-state if the
+ // parenthesized group gets a * quantifier, followed by
+ // - NOP, which may later be replaced by a save-state if there
+ // is an '|' alternation within the parens.
+ c.fixLiterals(false)
+ c.appendOp(urxNop, 0)
+ c.appendOp(urxNop, 0)
+
+ // On the Parentheses stack, start a new frame and add the postions
+ // of the two NOPs (a normal non-capturing () frame, except for the
+ // saving of the orignal mode flags.)
+ c.parenStack = append(c.parenStack, int(c.modeFlags))
+ c.parenStack = append(c.parenStack, parenFlags)
+ c.parenStack = append(c.parenStack, len(c.out.compiledPat)-2)
+ c.parenStack = append(c.parenStack, len(c.out.compiledPat)-1)
+
+ // Set the current mode flags to the new values.
+ if c.newModeFlags >= 0 {
+ panic("cNewModeFlags not properly initialized")
+ }
+ c.modeFlags = c.newModeFlags
+
+ case doBadModeFlag:
+ c.error(InvalidFlag)
+
+ case doSuppressComments:
+ // We have just scanned a '(?'. We now need to prevent the character scanner from
+ // treating a '#' as a to-the-end-of-line comment.
+ // (This Perl compatibility just gets uglier and uglier to do...)
+ c.eolComments = false
+
+ case doSetAddAmp:
+ set := c.setStack[len(c.setStack)-1]
+ set.AddRune(chAmp)
+
+ case doSetAddDash:
+ set := c.setStack[len(c.setStack)-1]
+ set.AddRune(chDash)
+
+ case doSetBackslashs:
+ set := c.setStack[len(c.setStack)-1]
+ set.AddAll(staticPropertySets[urxIsspaceSet])
+
+ case doSetBackslashS:
+ sset := uset.New()
+ sset.AddAll(staticPropertySets[urxIsspaceSet]) // TODO: add latin1 spaces
+ sset.Complement()
+
+ set := c.setStack[len(c.setStack)-1]
+ set.AddAll(sset)
+
+ case doSetBackslashd:
+ set := c.setStack[len(c.setStack)-1]
+ c.err = uprops.AddCategory(set, uchar.GcNdMask)
+
+ case doSetBackslashD:
+ digits := uset.New()
+ c.err = uprops.ApplyIntPropertyValue(digits, uprops.UCharGeneralCategoryMask, int32(uchar.GcNdMask))
+ digits.Complement()
+ set := c.setStack[len(c.setStack)-1]
+ set.AddAll(digits)
+
+ case doSetBackslashh:
+ h := uset.New()
+ c.err = uprops.ApplyIntPropertyValue(h, uprops.UCharGeneralCategoryMask, int32(uchar.GcZsMask))
+ h.AddRune(9) // Tab
+
+ set := c.setStack[len(c.setStack)-1]
+ set.AddAll(h)
+
+ case doSetBackslashH:
+ h := uset.New()
+ c.err = uprops.ApplyIntPropertyValue(h, uprops.UCharGeneralCategoryMask, int32(uchar.GcZsMask))
+ h.AddRune(9) // Tab
+ h.Complement()
+
+ set := c.setStack[len(c.setStack)-1]
+ set.AddAll(h)
+
+ case doSetBackslashv:
+ set := c.setStack[len(c.setStack)-1]
+ set.AddRuneRange(0x0a, 0x0d) // add range
+ set.AddRune(0x85)
+ set.AddRuneRange(0x2028, 0x2029)
+
+ case doSetBackslashV:
+ v := uset.New()
+ v.AddRuneRange(0x0a, 0x0d) // add range
+ v.AddRune(0x85)
+ v.AddRuneRange(0x2028, 0x2029)
+ v.Complement()
+
+ set := c.setStack[len(c.setStack)-1]
+ set.AddAll(v)
+
+ case doSetBackslashw:
+ set := c.setStack[len(c.setStack)-1]
+ set.AddAll(staticPropertySets[urxIswordSet])
+
+ case doSetBackslashW:
+ sset := uset.New()
+ sset.AddAll(staticPropertySets[urxIswordSet])
+ sset.Complement()
+
+ set := c.setStack[len(c.setStack)-1]
+ set.AddAll(sset)
+
+ case doSetBegin:
+ c.fixLiterals(false)
+ c.setStack = append(c.setStack, uset.New())
+ c.setOpStack = append(c.setOpStack, setStart)
+ if (c.modeFlags & CaseInsensitive) != 0 {
+ c.setOpStack = append(c.setOpStack, setCaseClose)
+ }
+
+ case doSetBeginDifference1:
+ // We have scanned something like [[abc]-[
+ // Set up a new UnicodeSet for the set beginning with the just-scanned '['
+ // Push a Difference operator, which will cause the new set to be subtracted from what
+ // went before once it is created.
+ c.setPushOp(setDifference1)
+ c.setOpStack = append(c.setOpStack, setStart)
+ if (c.modeFlags & CaseInsensitive) != 0 {
+ c.setOpStack = append(c.setOpStack, setCaseClose)
+ }
+
+ case doSetBeginIntersection1:
+ // We have scanned something like [[abc]&[
+ // Need both the '&' operator and the open '[' operator.
+ c.setPushOp(setIntersection1)
+ c.setOpStack = append(c.setOpStack, setStart)
+ if (c.modeFlags & CaseInsensitive) != 0 {
+ c.setOpStack = append(c.setOpStack, setCaseClose)
+ }
+
+ case doSetBeginUnion:
+ // We have scanned something like [[abc][
+ // Need to handle the union operation explicitly [[abc] | [
+ c.setPushOp(setUnion)
+ c.setOpStack = append(c.setOpStack, setStart)
+ if (c.modeFlags & CaseInsensitive) != 0 {
+ c.setOpStack = append(c.setOpStack, setCaseClose)
+ }
+
+ case doSetDifference2:
+ // We have scanned something like [abc--
+ // Consider this to unambiguously be a set difference operator.
+ c.setPushOp(setDifference2)
+
+ case doSetEnd:
+ // Have encountered the ']' that closes a set.
+ // Force the evaluation of any pending operations within this set,
+ // leave the completed set on the top of the set stack.
+ c.setEval(setEnd)
+ var start setOperation
+ start, c.setOpStack = stackPop(c.setOpStack)
+ if start != setStart {
+ panic("bad set operation in stack")
+ }
+
+ case doSetFinish:
+ // Finished a complete set expression, including all nested sets.
+ // The close bracket has already triggered clearing out pending set operators,
+ // the operator stack should be empty and the operand stack should have just
+ // one entry, the result set.
+ if len(c.setOpStack) > 0 {
+ panic("expected setOpStack to be empty")
+ }
+ var set *uset.UnicodeSet
+ set, c.setStack = stackPop(c.setStack)
+ c.compileSet(set)
+
+ case doSetIntersection2:
+ // Have scanned something like [abc&&
+ c.setPushOp(setIntersection2)
+
+ case doSetLiteral:
+ // Union the just-scanned literal character into the set being built.
+ // This operation is the highest precedence set operation, so we can always do
+ // it immediately, without waiting to see what follows. It is necessary to perform
+ // any pending '-' or '&' operation first, because these have the same precedence
+ // as union-ing in a literal'
+ c.setEval(setUnion)
+ set := c.setStack[len(c.setStack)-1]
+ set.AddRune(c.c.char)
+ c.lastSetLiteral = c.c.char
+
+ case doSetLiteralEscaped:
+ // A back-slash escaped literal character was encountered.
+ // Processing is the same as with setLiteral, above, with the addition of
+ // the optional check for errors on escaped ASCII letters.
+ if (c.modeFlags&ErrorOnUnknownEscapes) != 0 &&
+ ((c.c.char >= 0x41 && c.c.char <= 0x5A) || // in [A-Z]
+ (c.c.char >= 0x61 && c.c.char <= 0x7a)) { // in [a-z]
+ c.error(BadEscapeSequence)
+ }
+ c.setEval(setUnion)
+ set := c.setStack[len(c.setStack)-1]
+ set.AddRune(c.c.char)
+ c.lastSetLiteral = c.c.char
+
+ case doSetNamedChar:
+ // Scanning a \N{UNICODE CHARACTER NAME}
+ // Aside from the source of the character, the processing is identical to doSetLiteral,
+ // above.
+ ch := c.scanNamedChar()
+ c.setEval(setUnion)
+ set := c.setStack[len(c.setStack)-1]
+ set.AddRune(ch)
+ c.lastSetLiteral = ch
+
+ case doSetNamedRange:
+ // We have scanned literal-\N{CHAR NAME}. Add the range to the set.
+ // The left character is already in the set, and is saved in fLastSetLiteral.
+ // The right side needs to be picked up, the scan is at the 'N'.
+ // Lower Limit > Upper limit being an error matches both Java
+ // and ICU UnicodeSet behavior.
+ ch := c.scanNamedChar()
+ if c.err == nil && (c.lastSetLiteral == -1 || c.lastSetLiteral > ch) {
+ c.error(InvalidRange)
+ }
+ set := c.setStack[len(c.setStack)-1]
+ set.AddRuneRange(c.lastSetLiteral, ch)
+ c.lastSetLiteral = ch
+
+ case doSetNegate:
+ // Scanned a '^' at the start of a set.
+ // Push the negation operator onto the set op stack.
+ // A twist for case-insensitive matching:
+ // the case closure operation must happen _before_ negation.
+ // But the case closure operation will already be on the stack if it's required.
+ // This requires checking for case closure, and swapping the stack order
+ // if it is present.
+ tosOp := c.setOpStack[len(c.setOpStack)-1]
+ if tosOp == setCaseClose {
+ _, c.setOpStack = stackPop(c.setOpStack)
+ c.setOpStack = append(c.setOpStack, setNegation)
+ c.setOpStack = append(c.setOpStack, setCaseClose)
+ } else {
+ c.setOpStack = append(c.setOpStack, setNegation)
+ }
+
+ case doSetNoCloseError:
+ c.error(MissingCloseBracket)
+
+ case doSetOpError:
+ c.error(RuleSyntax) // -- or && at the end of a set. Illegal.
+
+ case doSetPosixProp:
+ if set := c.scanPosixProp(); set != nil {
+ c.setStack[len(c.setStack)-1].AddAll(set)
+ }
+
+ case doSetProp:
+ // Scanned a \p \P within [brackets].
+ if set := c.scanProp(); set != nil {
+ c.setStack[len(c.setStack)-1].AddAll(set)
+ }
+
+ case doSetRange:
+ // We have scanned literal-literal. Add the range to the set.
+ // The left character is already in the set, and is saved in fLastSetLiteral.
+ // The right side is the current character.
+ // Lower Limit > Upper limit being an error matches both Java
+ // and ICU UnicodeSet behavior.
+
+ if c.lastSetLiteral == -1 || c.lastSetLiteral > c.c.char {
+ c.error(InvalidRange)
+ }
+ c.setStack[len(c.setStack)-1].AddRuneRange(c.lastSetLiteral, c.c.char)
+
+ default:
+ panic("unexpected OP in parser")
+ }
+
+ return c.err == nil
+}
+
+func uCharDigitValue(char rune) int64 {
+ if char >= '0' && char <= '9' {
+ return int64(char - '0')
+ }
+ return -1
+}
+
+func stackPop[T any](stack []T) (T, []T) {
+ var out T
+ if len(stack) > 0 {
+ out = stack[len(stack)-1]
+ stack = stack[:len(stack)-1]
+ }
+ return out, stack
+}
+
+func (c *compiler) error(e CompileErrorCode) {
+ c.err = &CompileError{
+ Code: e,
+ Line: c.lineNum,
+ Offset: c.charNum,
+ Context: c.out.pattern,
+ }
+}
+
+func (c *compiler) stripNOPs() {
+ if c.err != nil {
+ return
+ }
+
+ end := len(c.out.compiledPat)
+ deltas := make([]int, 0, end)
+
+ // Make a first pass over the code, computing the amount that things
+ // will be offset at each location in the original code.
+ var loc, d int
+ for loc = 0; loc < end; loc++ {
+ deltas = append(deltas, d)
+ op := c.out.compiledPat[loc]
+ if op.typ() == urxNop {
+ d++
+ }
+ }
+
+ // Make a second pass over the code, removing the NOPs by moving following
+ // code up, and patching operands that refer to code locations that
+ // are being moved. The array of offsets from the first step is used
+ // to compute the new operand values.
+ var src, dst int
+ for src = 0; src < end; src++ {
+ op := c.out.compiledPat[src]
+ opType := op.typ()
+
+ switch opType {
+ case urxNop:
+ // skip
+
+ case urxStateSave,
+ urxJmp,
+ utxCtrLoop,
+ urxCtrLoopNg,
+ urxRelocOprnd,
+ urxJmpx,
+ urxJmpSav,
+ urxJmpSavX:
+ // These are instructions with operands that refer to code locations.
+ operandAddress := op.value()
+ fixedOperandAddress := operandAddress - deltas[operandAddress]
+ op = c.buildOp(opType, fixedOperandAddress)
+ c.out.compiledPat[dst] = op
+ dst++
+
+ case urxBackref, urxBackrefI:
+ where := op.value()
+ if where > len(c.out.groupMap) {
+ c.error(InvalidBackRef)
+ break
+ }
+
+ where = int(c.out.groupMap[where-1])
+ op = c.buildOp(opType, where)
+ c.out.compiledPat[dst] = op
+ dst++
+ c.out.needsAltInput = true
+
+ case urxReservedOp,
+ urxReservedOpN,
+ urxBacktrack,
+ urxEnd,
+ urxOnechar,
+ urxString,
+ urxStringLen,
+ urxStartCapture,
+ urxEndCapture,
+ urxStaticSetref,
+ urxStatSetrefN,
+ urxSetref,
+ urxDotany,
+ urxFail,
+ urxBackslashB,
+ urxBackslashBu,
+ urxBackslashG,
+ urxBackslashX,
+ urxBackslashZ,
+ urxDotanyAll,
+ urxBackslashD,
+ urxCaret,
+ urxDollar,
+ urxCtrInit,
+ urxCtrInitNg,
+ urxDotanyUnix,
+ urxStoSp,
+ urxLdSp,
+ urxStoInpLoc,
+ urxLaStart,
+ urxLaEnd,
+ urcOnecharI,
+ urxStringI,
+ urxDollarM,
+ urxCaretM,
+ urxCaretMUnix,
+ urxLbStart,
+ urxLbCont,
+ urxLbEnd,
+ urxLbnCount,
+ urxLbnEnd,
+ urxLoopSrI,
+ urxLoopDotI,
+ urxLoopC,
+ urxDollarD,
+ urxDollarMd,
+ urxBackslashH,
+ urxBackslashR,
+ urxBackslashV:
+ // These instructions are unaltered by the relocation.
+ c.out.compiledPat[dst] = op
+ dst++
+
+ default:
+ // Some op is unaccounted for.
+ panic("unreachable")
+ }
+ }
+
+ c.out.compiledPat = c.out.compiledPat[:dst]
+}
+
+func (c *compiler) matchStartType() {
+ var loc int // Location in the pattern of the current op being processed.
+ var currentLen int32 // Minimum length of a match to this point (loc) in the pattern
+ var numInitialStrings int // Number of strings encountered that could match at start.
+ var atStart = true // True if no part of the pattern yet encountered
+ // could have advanced the position in a match.
+ // (Maximum match length so far == 0)
+
+ // forwardedLength is a vector holding minimum-match-length values that
+ // are propagated forward in the pattern by JMP or STATE_SAVE operations.
+ // It must be one longer than the pattern being checked because some ops
+ // will jmp to a end-of-block+1 location from within a block, and we must
+ // count those when checking the block.
+ end := len(c.out.compiledPat)
+ forwardedLength := make([]int32, end+1)
+
+ for loc = 3; loc < end; loc++ {
+ forwardedLength[loc] = math.MaxInt32
+ }
+
+ for loc = 3; loc < end; loc++ {
+ op := c.out.compiledPat[loc]
+ opType := op.typ()
+
+ // The loop is advancing linearly through the pattern.
+ // If the op we are now at was the destination of a branch in the pattern,
+ // and that path has a shorter minimum length than the current accumulated value,
+ // replace the current accumulated value.
+ if forwardedLength[loc] < currentLen {
+ currentLen = forwardedLength[loc]
+ }
+
+ switch opType {
+ // Ops that don't change the total length matched
+ case urxReservedOp,
+ urxEnd,
+ urxFail,
+ urxStringLen,
+ urxNop,
+ urxStartCapture,
+ urxEndCapture,
+ urxBackslashB,
+ urxBackslashBu,
+ urxBackslashG,
+ urxBackslashZ,
+ urxDollar,
+ urxDollarM,
+ urxDollarD,
+ urxDollarMd,
+ urxRelocOprnd,
+ urxStoInpLoc,
+ urxBackref, // BackRef. Must assume that it might be a zero length match
+ urxBackrefI,
+ urxStoSp, // Setup for atomic or possessive blocks. Doesn't change what can match.
+ urxLdSp:
+ // skip
+
+ case urxCaret:
+ if atStart {
+ c.out.startType = startStart
+ }
+
+ case urxCaretM, urxCaretMUnix:
+ if atStart {
+ c.out.startType = startLine
+ }
+
+ case urxOnechar:
+ if currentLen == 0 {
+ // This character could appear at the start of a match.
+ // Add it to the set of possible starting characters.
+ c.out.initialChars.AddRune(op.value32())
+ numInitialStrings += 2
+ }
+ currentLen = safeIncrement(currentLen, 1)
+ atStart = false
+
+ case urxSetref:
+ if currentLen == 0 {
+ sn := op.value()
+ set := c.out.sets[sn]
+ c.out.initialChars.AddAll(set)
+ numInitialStrings += 2
+ }
+ currentLen = safeIncrement(currentLen, 1)
+ atStart = false
+
+ case urxLoopSrI:
+ // [Set]*, like a SETREF, above, in what it can match,
+ // but may not match at all, so currentLen is not incremented.
+ if currentLen == 0 {
+ sn := op.value()
+ set := c.out.sets[sn]
+ c.out.initialChars.AddAll(set)
+ numInitialStrings += 2
+ }
+ atStart = false
+
+ case urxLoopDotI:
+ if currentLen == 0 {
+ // .* at the start of a pattern.
+ // Any character can begin the match.
+ c.out.initialChars.Clear()
+ c.out.initialChars.Complement()
+ numInitialStrings += 2
+ }
+ atStart = false
+
+ case urxStaticSetref:
+ if currentLen == 0 {
+ sn := op.value()
+ c.out.initialChars.AddAll(staticPropertySets[sn])
+ numInitialStrings += 2
+ }
+ currentLen = safeIncrement(currentLen, 1)
+ atStart = false
+
+ case urxStatSetrefN:
+ if currentLen == 0 {
+ sn := op.value()
+ sc := uset.New()
+ sc.AddAll(staticPropertySets[sn])
+ sc.Complement()
+
+ c.out.initialChars.AddAll(sc)
+ numInitialStrings += 2
+ }
+ currentLen = safeIncrement(currentLen, 1)
+ atStart = false
+
+ case urxBackslashD:
+ // Digit Char
+ if currentLen == 0 {
+ s := uset.New()
+ c.err = uprops.ApplyIntPropertyValue(s, uprops.UCharGeneralCategoryMask, int32(uchar.GcNdMask))
+ if op.value() != 0 {
+ s.Complement()
+ }
+ c.out.initialChars.AddAll(s)
+ numInitialStrings += 2
+ }
+ currentLen = safeIncrement(currentLen, 1)
+ atStart = false
+
+ case urxBackslashH:
+ // Horiz white space
+ if currentLen == 0 {
+ s := uset.New()
+ c.err = uprops.ApplyIntPropertyValue(s, uprops.UCharGeneralCategoryMask, int32(uchar.GcZsMask))
+ s.AddRune(9) // Tab
+ if op.value() != 0 {
+ s.Complement()
+ }
+ c.out.initialChars.AddAll(s)
+ numInitialStrings += 2
+ }
+ currentLen = safeIncrement(currentLen, 1)
+ atStart = false
+
+ case urxBackslashR, // Any line ending sequence
+ urxBackslashV: // Any line ending code point, with optional negation
+ if currentLen == 0 {
+ s := uset.New()
+ s.AddRuneRange(0x0a, 0x0d) // add range
+ s.AddRune(0x85)
+ s.AddRuneRange(0x2028, 0x2029)
+ if op.value() != 0 {
+ // Complement option applies to URX_BACKSLASH_V only.
+ s.Complement()
+ }
+ c.out.initialChars.AddAll(s)
+ numInitialStrings += 2
+ }
+ currentLen = safeIncrement(currentLen, 1)
+ atStart = false
+
+ case urcOnecharI:
+ // Case Insensitive Single Character.
+ if currentLen == 0 {
+ ch := op.value32()
+ if uprops.HasBinaryProperty(ch, uprops.UCharCaseSensitive) {
+ starters := uset.New()
+ starters.AddRuneRange(ch, ch)
+ starters.CloseOver(uset.CaseInsensitive)
+ // findCaseInsensitiveStarters(c, &starters);
+ // For ONECHAR_I, no need to worry about text chars that expand on folding into
+ // strings. The expanded folding can't match the pattern.
+ c.out.initialChars.AddAll(starters)
+ } else {
+ // Char has no case variants. Just add it as-is to the
+ // set of possible starting chars.
+ c.out.initialChars.AddRune(ch)
+ }
+ numInitialStrings += 2
+ }
+ currentLen = safeIncrement(currentLen, 1)
+ atStart = false
+
+ case urxBackslashX, // Grapheme Cluster. Minimum is 1, max unbounded.
+ urxDotanyAll, // . matches one or two.
+ urxDotany,
+ urxDotanyUnix:
+ if currentLen == 0 {
+ // These constructs are all bad news when they appear at the start
+ // of a match. Any character can begin the match.
+ c.out.initialChars.Clear()
+ c.out.initialChars.Complement()
+ numInitialStrings += 2
+ }
+ currentLen = safeIncrement(currentLen, 1)
+ atStart = false
+
+ case urxJmpx:
+ loc++ // Except for extra operand on URX_JMPX, same as URX_JMP.
+ fallthrough
+
+ case urxJmp:
+ jmpDest := op.value()
+ if jmpDest < loc {
+ // Loop of some kind. Can safely ignore, the worst that will happen
+ // is that we understate the true minimum length
+ currentLen = forwardedLength[loc+1]
+ } else {
+ // Forward jump. Propagate the current min length to the target loc of the jump.
+ if forwardedLength[jmpDest] > currentLen {
+ forwardedLength[jmpDest] = currentLen
+ }
+ }
+ atStart = false
+
+ case urxJmpSav,
+ urxJmpSavX:
+ // Combo of state save to the next loc, + jmp backwards.
+ // Net effect on min. length computation is nothing.
+ atStart = false
+
+ case urxBacktrack:
+ // Fails are kind of like a branch, except that the min length was
+ // propagated already, by the state save.
+ currentLen = forwardedLength[loc+1]
+ atStart = false
+
+ case urxStateSave:
+ // State Save, for forward jumps, propagate the current minimum.
+ // of the state save.
+ jmpDest := op.value()
+ if jmpDest > loc {
+ if currentLen < forwardedLength[jmpDest] {
+ forwardedLength[jmpDest] = (currentLen)
+ }
+ }
+ atStart = false
+
+ case urxString:
+ loc++
+ stringLenOp := c.out.compiledPat[loc]
+ stringLen := stringLenOp.value()
+ if currentLen == 0 {
+ // Add the starting character of this string to the set of possible starting
+ // characters for this pattern.
+ stringStartIdx := op.value()
+ ch := c.out.literalText[stringStartIdx]
+ c.out.initialChars.AddRune(ch)
+
+ // Remember this string. After the entire pattern has been checked,
+ // if nothing else is identified that can start a match, we'll use it.
+ numInitialStrings++
+ c.out.initialStringIdx = stringStartIdx
+ c.out.initialStringLen = stringLen
+ }
+
+ currentLen = safeIncrement(currentLen, stringLen)
+ atStart = false
+
+ case urxStringI:
+ // Case-insensitive string. Unlike exact-match strings, we won't
+ // attempt a string search for possible match positions. But we
+ // do update the set of possible starting characters.
+ loc++
+ stringLenOp := c.out.compiledPat[loc]
+ stringLen := stringLenOp.value()
+ if currentLen == 0 {
+ // Add the starting character of this string to the set of possible starting
+ // characters for this pattern.
+ stringStartIdx := op.value()
+ ch := c.out.literalText[stringStartIdx]
+ s := uset.New()
+ c.findCaseInsensitiveStarters(ch, s)
+ c.out.initialChars.AddAll(s)
+ numInitialStrings += 2 // Matching on an initial string not possible.
+ }
+ currentLen = safeIncrement(currentLen, stringLen)
+ atStart = false
+
+ case urxCtrInit,
+ urxCtrInitNg:
+ // Loop Init Ops. These don't change the min length, but they are 4 word ops
+ // so location must be updated accordingly.
+ // Loop Init Ops.
+ // If the min loop count == 0
+ // move loc forwards to the end of the loop, skipping over the body.
+ // If the min count is > 0,
+ // continue normal processing of the body of the loop.
+ loopEndLoc := c.out.compiledPat[loc+1].value()
+ minLoopCount := int(c.out.compiledPat[loc+2])
+ if minLoopCount == 0 {
+ // Min Loop Count of 0, treat like a forward branch and
+ // move the current minimum length up to the target
+ // (end of loop) location.
+ if forwardedLength[loopEndLoc] > currentLen {
+ forwardedLength[loopEndLoc] = currentLen
+ }
+ }
+ loc += 3 // Skips over operands of CTR_INIT
+ atStart = false
+
+ case utxCtrLoop,
+ urxCtrLoopNg:
+ // Loop ops.
+ // The jump is conditional, backwards only.
+ atStart = false
+
+ case urxLoopC:
+ // More loop ops. These state-save to themselves.
+ // don't change the minimum match
+ atStart = false
+
+ case urxLaStart,
+ urxLbStart:
+ // Look-around. Scan forward until the matching look-ahead end,
+ // without processing the look-around block. This is overly pessimistic.
+
+ // Keep track of the nesting depth of look-around blocks. Boilerplate code for
+ // lookahead contains two LA_END instructions, so count goes up by two
+ // for each LA_START.
+ var depth int
+ if opType == urxLaStart {
+ depth = 2
+ } else {
+ depth = 1
+ }
+ for {
+ loc++
+ op = c.out.compiledPat[loc]
+ if op.typ() == urxLaStart {
+ depth += 2
+ }
+ if op.typ() == urxLbStart {
+ depth++
+ }
+ if op.typ() == urxLaEnd || op.typ() == urxLbnEnd {
+ depth--
+ if depth == 0 {
+ break
+ }
+ }
+ if op.typ() == urxStateSave {
+ // Need this because neg lookahead blocks will FAIL to outside
+ // of the block.
+ jmpDest := op.value()
+ if jmpDest > loc {
+ if currentLen < forwardedLength[jmpDest] {
+ forwardedLength[jmpDest] = (currentLen)
+ }
+ }
+ }
+ }
+
+ case urxLaEnd,
+ urxLbCont,
+ urxLbEnd,
+ urxLbnCount,
+ urxLbnEnd:
+ panic("should be consumed in URX_LA_START")
+
+ default:
+ panic("unreachable")
+ }
+ }
+
+ // Sort out what we should check for when looking for candidate match start positions.
+ // In order of preference,
+ // 1. Start of input text buffer.
+ // 2. A literal string.
+ // 3. Start of line in multi-line mode.
+ // 4. A single literal character.
+ // 5. A character from a set of characters.
+ //
+ if c.out.startType == startStart {
+ // Match only at the start of an input text string.
+ // start type is already set. We're done.
+ } else if numInitialStrings == 1 && c.out.minMatchLen > 0 {
+ // Match beginning only with a literal string.
+ ch := c.out.literalText[c.out.initialStringIdx]
+ c.out.startType = startString
+ c.out.initialChar = ch
+ } else if c.out.startType == startLine {
+ // Match at start of line in Multi-Line mode.
+ // Nothing to do here; everything is already set.
+ } else if c.out.minMatchLen == 0 {
+ // Zero length match possible. We could start anywhere.
+ c.out.startType = startNoInfo
+ } else if c.out.initialChars.Len() == 1 {
+ // All matches begin with the same char.
+ c.out.startType = startChar
+ c.out.initialChar = c.out.initialChars.RuneAt(0)
+ } else if !c.out.initialChars.ContainsRuneRange(0, 0x10ffff) && c.out.minMatchLen > 0 {
+ // Matches start with a set of character smaller than the set of all chars.
+ c.out.startType = startSet
+ } else {
+ // Matches can start with anything
+ c.out.startType = startNoInfo
+ }
+}
+
+func (c *compiler) appendOp(typ opcode, arg int) {
+ c.appendIns(c.buildOp(typ, arg))
+}
+
+func (c *compiler) appendIns(ins instruction) {
+ if c.err != nil {
+ return
+ }
+ c.out.compiledPat = append(c.out.compiledPat, ins)
+}
+
+func (c *compiler) buildOp(typ opcode, val int) instruction {
+ if c.err != nil {
+ return 0
+ }
+ if val > 0x00ffffff {
+ panic("bad argument to buildOp")
+ }
+ if val < 0 {
+ if !(typ == urxReservedOpN || typ == urxReservedOp) {
+ panic("bad value to buildOp")
+ }
+ typ = urxReservedOpN
+ }
+ return instruction(int32(typ)<<24 | int32(val))
+}
+
+func (c *compiler) handleCloseParen() {
+ if len(c.parenStack) == 0 {
+ c.error(MismatchedParen)
+ return
+ }
+
+ c.fixLiterals(false)
+
+ var patIdx int
+ var patOp instruction
+
+ for {
+ patIdx, c.parenStack = stackPop(c.parenStack)
+ if patIdx < 0 {
+ break
+ }
+
+ patOp = c.out.compiledPat[patIdx]
+ if patOp.value() != 0 {
+ panic("branch target for JMP should not be set")
+ }
+ patOp |= instruction(len(c.out.compiledPat))
+ c.out.compiledPat[patIdx] = patOp
+ c.matchOpenParen = patIdx
+ }
+
+ var modeFlags int
+ modeFlags, c.parenStack = stackPop(c.parenStack)
+ if modeFlags >= 0 {
+ panic("modeFlags in paren stack was not negated")
+ }
+
+ c.modeFlags = RegexpFlag(modeFlags)
+
+ switch patIdx {
+ case parenPlain, parenFlags:
+ // No additional fixups required.
+ // (Grouping-only parentheses)
+ case parenCapturing:
+ // Capturing Parentheses.
+ // Insert a End Capture op into the pattern.
+ // The frame offset of the variables for this cg is obtained from the
+ // start capture op and put it into the end-capture op.
+
+ captureOp := c.out.compiledPat[c.matchOpenParen+1]
+ if captureOp.typ() != urxStartCapture {
+ panic("bad type in capture op (expected URX_START_CAPTURE)")
+ }
+ frameVarLocation := captureOp.value()
+ c.appendOp(urxEndCapture, frameVarLocation)
+
+ case parenAtomic:
+ // Atomic Parenthesis.
+ // Insert a LD_SP operation to restore the state stack to the position
+ // it was when the atomic parens were entered.
+ stoOp := c.out.compiledPat[c.matchOpenParen+1]
+ if stoOp.typ() != urxStoSp {
+ panic("bad type in capture op (expected URX_STO_SP)")
+ }
+ stoLoc := stoOp.value()
+ c.appendOp(urxLdSp, stoLoc)
+
+ case parenLookahead:
+ startOp := c.out.compiledPat[c.matchOpenParen-5]
+ if startOp.typ() != urxLaStart {
+ panic("bad type in capture op (expected URX_LA_START)")
+ }
+ dataLoc := startOp.value()
+ c.appendOp(urxLaEnd, dataLoc)
+
+ case parenNegLookahead:
+ startOp := c.out.compiledPat[c.matchOpenParen-1]
+ if startOp.typ() != urxLaStart {
+ panic("bad type in capture op (expected URX_LA_START)")
+ }
+ dataLoc := startOp.value()
+ c.appendOp(urxLaEnd, dataLoc)
+ c.appendOp(urxBacktrack, 0)
+ c.appendOp(urxLaEnd, dataLoc)
+
+ // Patch the URX_SAVE near the top of the block.
+ // The destination of the SAVE is the final LA_END that was just added.
+ saveOp := c.out.compiledPat[c.matchOpenParen]
+ if saveOp.typ() != urxStateSave {
+ panic("bad type in capture op (expected URX_STATE_SAVE)")
+ }
+ saveOp = c.buildOp(urxStateSave, len(c.out.compiledPat)-1)
+ c.out.compiledPat[c.matchOpenParen] = saveOp
+
+ case parenLookBehind:
+ startOp := c.out.compiledPat[c.matchOpenParen-4]
+ if startOp.typ() != urxLbStart {
+ panic("bad type in capture op (expected URX_LB_START)")
+ }
+ dataLoc := startOp.value()
+ c.appendOp(urxLbEnd, dataLoc)
+ c.appendOp(urxLaEnd, dataLoc)
+
+ // Determine the min and max bounds for the length of the
+ // string that the pattern can match.
+ // An unbounded upper limit is an error.
+ patEnd := len(c.out.compiledPat) - 1
+ minML := c.minMatchLength(c.matchOpenParen, patEnd)
+ maxML := c.maxMatchLength(c.matchOpenParen, patEnd)
+
+ if maxML == math.MaxInt32 {
+ c.error(LookBehindLimit)
+ break
+ }
+ if minML == math.MaxInt32 {
+ // This condition happens when no match is possible, such as with a
+ // [set] expression containing no elements.
+ // In principle, the generated code to evaluate the expression could be deleted,
+ // but it's probably not worth the complication.
+ minML = 0
+ }
+
+ c.out.compiledPat[c.matchOpenParen-2] = instruction(minML)
+ c.out.compiledPat[c.matchOpenParen-1] = instruction(maxML)
+
+ case parenLookBehindN:
+ startOp := c.out.compiledPat[c.matchOpenParen-5]
+ if startOp.typ() != urxLbStart {
+ panic("bad type in capture op (expected URX_LB_START)")
+ }
+ dataLoc := startOp.value()
+ c.appendOp(urxLbnEnd, dataLoc)
+
+ // Determine the min and max bounds for the length of the
+ // string that the pattern can match.
+ // An unbounded upper limit is an error.
+ patEnd := len(c.out.compiledPat) - 1
+ minML := c.minMatchLength(c.matchOpenParen, patEnd)
+ maxML := c.maxMatchLength(c.matchOpenParen, patEnd)
+
+ if instruction(maxML).typ() != 0 {
+ c.error(LookBehindLimit)
+ break
+ }
+ if maxML == math.MaxInt32 {
+ c.error(LookBehindLimit)
+ break
+ }
+ if minML == math.MaxInt32 {
+ // This condition happens when no match is possible, such as with a
+ // [set] expression containing no elements.
+ // In principle, the generated code to evaluate the expression could be deleted,
+ // but it's probably not worth the complication.
+ minML = 0
+ }
+
+ c.out.compiledPat[c.matchOpenParen-3] = instruction(minML)
+ c.out.compiledPat[c.matchOpenParen-2] = instruction(maxML)
+
+ op := c.buildOp(urxRelocOprnd, len(c.out.compiledPat))
+ c.out.compiledPat[c.matchOpenParen-1] = op
+
+ default:
+ panic("unexpected opcode in parenStack")
+ }
+
+ c.matchCloseParen = len(c.out.compiledPat)
+}
+
+func (c *compiler) fixLiterals(split bool) {
+ if len(c.literalChars) == 0 {
+ return
+ }
+
+ lastCodePoint := c.literalChars[len(c.literalChars)-1]
+
+ // Split: We need to ensure that the last item in the compiled pattern
+ // refers only to the last literal scanned in the pattern, so that
+ // quantifiers (*, +, etc.) affect only it, and not a longer string.
+ // Split before case folding for case insensitive matches.
+ if split {
+ c.literalChars = c.literalChars[:len(c.literalChars)-1]
+ c.fixLiterals(false)
+
+ c.literalChar(lastCodePoint)
+ c.fixLiterals(false)
+ return
+ }
+
+ if c.modeFlags&CaseInsensitive != 0 {
+ c.literalChars = ucase.FoldRunes(c.literalChars)
+ lastCodePoint = c.literalChars[len(c.literalChars)-1]
+ }
+
+ if len(c.literalChars) == 1 {
+ if c.modeFlags&CaseInsensitive != 0 && uprops.HasBinaryProperty(lastCodePoint, uprops.UCharCaseSensitive) {
+ c.appendOp(urcOnecharI, int(lastCodePoint))
+ } else {
+ c.appendOp(urxOnechar, int(lastCodePoint))
+ }
+ } else {
+ if len(c.literalChars) > 0x00ffffff || len(c.out.literalText) > 0x00ffffff {
+ c.error(PatternTooBig)
+ }
+ if c.modeFlags&CaseInsensitive != 0 {
+ c.appendOp(urxStringI, len(c.out.literalText))
+ } else {
+ c.appendOp(urxString, len(c.out.literalText))
+ }
+ c.appendOp(urxStringLen, len(c.literalChars))
+ c.out.literalText = append(c.out.literalText, c.literalChars...)
+ }
+
+ c.literalChars = c.literalChars[:0]
+}
+
+func (c *compiler) literalChar(point rune) {
+ c.literalChars = append(c.literalChars, point)
+}
+
+func (c *compiler) allocateData(size int) int {
+ if c.err != nil {
+ return 0
+ }
+ if size <= 0 || size > 0x100 || c.out.dataSize < 0 {
+ c.error(InternalError)
+ return 0
+ }
+
+ dataIndex := c.out.dataSize
+ c.out.dataSize += size
+ if c.out.dataSize >= 0x00fffff0 {
+ c.error(InternalError)
+ }
+ return dataIndex
+}
+
+func (c *compiler) allocateStackData(size int) int {
+ if c.err != nil {
+ return 0
+ }
+ if size <= 0 || size > 0x100 || c.out.frameSize < 0 {
+ c.error(InternalError)
+ return 0
+ }
+ dataIndex := c.out.frameSize
+ c.out.frameSize += size
+ if c.out.frameSize >= 0x00fffff0 {
+ c.error(InternalError)
+ }
+ return dataIndex
+}
+
+func (c *compiler) insertOp(where int) {
+ if where < 0 || where >= len(c.out.compiledPat) {
+ panic("insertOp: out of bounds")
+ }
+
+ nop := c.buildOp(urxNop, 0)
+ c.out.compiledPat = slices.Insert(c.out.compiledPat, where, nop)
+
+ // Walk through the pattern, looking for any ops with targets that
+ // were moved down by the insert. Fix them.
+ for loc, op := range c.out.compiledPat {
+ switch op.typ() {
+ case urxJmp, urxJmpx, urxStateSave, utxCtrLoop, urxCtrLoopNg, urxJmpSav, urxJmpSavX, urxRelocOprnd:
+ if op.value() > where {
+ op = c.buildOp(op.typ(), op.value()+1)
+ c.out.compiledPat[loc] = op
+ }
+ }
+ }
+
+ // Now fix up the parentheses stack. All positive values in it are locations in
+ // the compiled pattern. (Negative values are frame boundaries, and don't need fixing.)
+ for loc, x := range c.parenStack {
+ if x > where {
+ c.parenStack[loc] = x + 1
+ }
+ }
+
+ if c.matchCloseParen > where {
+ c.matchCloseParen++
+ }
+ if c.matchOpenParen > where {
+ c.matchOpenParen++
+ }
+}
+
+func (c *compiler) blockTopLoc(reserve bool) int {
+ var loc int
+ c.fixLiterals(true)
+
+ if len(c.out.compiledPat) == c.matchCloseParen {
+ // The item just processed is a parenthesized block.
+ loc = c.matchOpenParen
+ } else {
+ // Item just compiled is a single thing, a ".", or a single char, a string or a set reference.
+ // No slot for STATE_SAVE was pre-reserved in the compiled code.
+ // We need to make space now.
+ loc = len(c.out.compiledPat) - 1
+ op := c.out.compiledPat[loc]
+ if op.typ() == urxStringLen {
+ // Strings take two opcode, we want the position of the first one.
+ // We can have a string at this point if a single character case-folded to two.
+ loc--
+ }
+ if reserve {
+ nop := c.buildOp(urxNop, 0)
+ c.out.compiledPat = slices.Insert(c.out.compiledPat, loc, nop)
+ }
+ }
+ return loc
+}
+
+func (c *compiler) compileInlineInterval() bool {
+ if c.intervalUpper > 10 || c.intervalUpper < c.intervalLow {
+ return false
+ }
+
+ topOfBlock := c.blockTopLoc(false)
+ if c.intervalUpper == 0 {
+ // Pathological case. Attempt no matches, as if the block doesn't exist.
+ // Discard the generated code for the block.
+ // If the block included parens, discard the info pertaining to them as well.
+ c.out.compiledPat = c.out.compiledPat[:topOfBlock]
+ if c.matchOpenParen >= topOfBlock {
+ c.matchOpenParen = -1
+ }
+ if c.matchCloseParen >= topOfBlock {
+ c.matchCloseParen = -1
+ }
+ return true
+ }
+
+ if topOfBlock != len(c.out.compiledPat)-1 && c.intervalUpper != 1 {
+ // The thing being repeated is not a single op, but some
+ // more complex block. Do it as a loop, not inlines.
+ // Note that things "repeated" a max of once are handled as inline, because
+ // the one copy of the code already generated is just fine.
+ return false
+ }
+
+ // Pick up the opcode that is to be repeated
+ //
+ op := c.out.compiledPat[topOfBlock]
+
+ // Compute the pattern location where the inline sequence
+ // will end, and set up the state save op that will be needed.
+ //
+ endOfSequenceLoc := len(c.out.compiledPat) - 1 + c.intervalUpper + (c.intervalUpper - c.intervalLow)
+
+ saveOp := c.buildOp(urxStateSave, endOfSequenceLoc)
+ if c.intervalLow == 0 {
+ c.insertOp(topOfBlock)
+ c.out.compiledPat[topOfBlock] = saveOp
+ }
+
+ // Loop, emitting the op for the thing being repeated each time.
+ // Loop starts at 1 because one instance of the op already exists in the pattern,
+ // it was put there when it was originally encountered.
+ for i := 1; i < c.intervalUpper; i++ {
+ if i >= c.intervalLow {
+ c.appendIns(saveOp)
+ }
+ c.appendIns(op)
+ }
+ return true
+}
+
+func (c *compiler) compileInterval(init opcode, loop opcode) {
+ // The CTR_INIT op at the top of the block with the {n,m} quantifier takes
+ // four slots in the compiled code. Reserve them.
+ topOfBlock := c.blockTopLoc(true)
+ c.insertOp(topOfBlock)
+ c.insertOp(topOfBlock)
+ c.insertOp(topOfBlock)
+
+ // The operands for the CTR_INIT opcode include the index in the matcher data
+ // of the counter. Allocate it now. There are two data items
+ // counterLoc --> Loop counter
+ // +1 --> Input index (for breaking non-progressing loops)
+ // (Only present if unbounded upper limit on loop)
+ var dataSize int
+ if c.intervalUpper < 0 {
+ dataSize = 2
+ } else {
+ dataSize = 1
+ }
+ counterLoc := c.allocateStackData(dataSize)
+
+ op := c.buildOp(init, counterLoc)
+ c.out.compiledPat[topOfBlock] = op
+
+ // The second operand of CTR_INIT is the location following the end of the loop.
+ // Must put in as a URX_RELOC_OPRND so that the value will be adjusted if the
+ // compilation of something later on causes the code to grow and the target
+ // position to move.
+ loopEnd := len(c.out.compiledPat)
+ op = c.buildOp(urxRelocOprnd, loopEnd)
+ c.out.compiledPat[topOfBlock+1] = op
+
+ // Followed by the min and max counts.
+ c.out.compiledPat[topOfBlock+2] = instruction(c.intervalLow)
+ c.out.compiledPat[topOfBlock+3] = instruction(c.intervalUpper)
+
+ // Append the CTR_LOOP op. The operand is the location of the CTR_INIT op.
+ // Goes at end of the block being looped over, so just append to the code so far.
+ c.appendOp(loop, topOfBlock)
+
+ if (c.intervalLow&0xff000000) != 0 || (c.intervalUpper > 0 && (c.intervalUpper&0xff000000) != 0) {
+ c.error(NumberTooBig)
+ }
+
+ if c.intervalLow > c.intervalUpper && c.intervalUpper != -1 {
+ c.error(MaxLtMin)
+ }
+}
+
+func (c *compiler) scanNamedChar() rune {
+ c.nextChar(&c.c)
+ if c.c.char != chLBrace {
+ c.error(PropertySyntax)
+ return 0
+ }
+
+ var charName []rune
+ for {
+ c.nextChar(&c.c)
+ if c.c.char == chRBrace {
+ break
+ }
+ if c.c.char == -1 {
+ c.error(PropertySyntax)
+ return 0
+ }
+ charName = append(charName, c.c.char)
+ }
+
+ if !isInvariantUString(charName) {
+ // All Unicode character names have only invariant characters.
+ // The API to get a character, given a name, accepts only char *, forcing us to convert,
+ // which requires this error check
+ c.error(PropertySyntax)
+ return 0
+ }
+
+ theChar := unames.CharForName(unames.UnicodeCharName, string(charName))
+ if c.err != nil {
+ c.error(PropertySyntax)
+ }
+
+ c.nextChar(&c.c) // Continue overall regex pattern processing with char after the '}'
+ return theChar
+}
+
+func isInvariantUString(name []rune) bool {
+ for _, c := range name {
+ /*
+ * no assertions here because these functions are legitimately called
+ * for strings with variant characters
+ */
+ if !ucharIsInvariant(c) {
+ return false /* found a variant char */
+ }
+ }
+ return true
+}
+
+var invariantChars = [...]uint32{
+ 0xfffffbff, /* 00..1f but not 0a */
+ 0xffffffe5, /* 20..3f but not 21 23 24 */
+ 0x87fffffe, /* 40..5f but not 40 5b..5e */
+ 0x87fffffe, /* 60..7f but not 60 7b..7e */
+}
+
+func ucharIsInvariant(c rune) bool {
+ return c <= 0x7f && (invariantChars[(c)>>5]&(uint32(1)<<(c&0x1f))) != 0
+}
+
+func (c *compiler) setPushOp(op setOperation) {
+ c.setEval(op)
+ c.setOpStack = append(c.setOpStack, op)
+ c.setStack = append(c.setStack, uset.New())
+}
+
+func (c *compiler) setEval(nextOp setOperation) {
+ var rightOperand *uset.UnicodeSet
+ var leftOperand *uset.UnicodeSet
+
+ for {
+ pendingSetOp := c.setOpStack[len(c.setOpStack)-1]
+ if (pendingSetOp & 0xffff0000) < (nextOp & 0xffff0000) {
+ break
+ }
+
+ c.setOpStack = c.setOpStack[:len(c.setOpStack)-1]
+ rightOperand = c.setStack[len(c.setStack)-1]
+
+ switch pendingSetOp {
+ case setNegation:
+ rightOperand.Complement()
+
+ case setCaseClose:
+ rightOperand.CloseOver(uset.CaseInsensitive)
+
+ case setDifference1, setDifference2:
+ c.setStack = c.setStack[:len(c.setStack)-1]
+ leftOperand = c.setStack[len(c.setStack)-1]
+ leftOperand.RemoveAll(rightOperand)
+
+ case setIntersection1, setIntersection2:
+ c.setStack = c.setStack[:len(c.setStack)-1]
+ leftOperand = c.setStack[len(c.setStack)-1]
+ leftOperand.RetainAll(rightOperand)
+
+ case setUnion:
+ c.setStack = c.setStack[:len(c.setStack)-1]
+ leftOperand = c.setStack[len(c.setStack)-1]
+ leftOperand.AddAll(rightOperand)
+
+ default:
+ panic("unreachable")
+ }
+ }
+}
+
+func safeIncrement(val int32, delta int) int32 {
+ if delta <= math.MaxInt32 && math.MaxInt32-val > int32(delta) {
+ return val + int32(delta)
+ }
+ return math.MaxInt32
+}
+
+func (c *compiler) minMatchLength(start, end int) int32 {
+ if c.err != nil {
+ return 0
+ }
+
+ var loc int
+ var currentLen int32
+
+ // forwardedLength is a vector holding minimum-match-length values that
+ // are propagated forward in the pattern by JMP or STATE_SAVE operations.
+ // It must be one longer than the pattern being checked because some ops
+ // will jmp to a end-of-block+1 location from within a block, and we must
+ // count those when checking the block.
+ forwardedLength := make([]int32, end+2)
+ for i := range forwardedLength {
+ forwardedLength[i] = math.MaxInt32
+ }
+
+ for loc = start; loc <= end; loc++ {
+ op := c.out.compiledPat[loc]
+ opType := op.typ()
+
+ // The loop is advancing linearly through the pattern.
+ // If the op we are now at was the destination of a branch in the pattern,
+ // and that path has a shorter minimum length than the current accumulated value,
+ // replace the current accumulated value.
+ // no-match-possible cases.
+ if forwardedLength[loc] < currentLen {
+ currentLen = forwardedLength[loc]
+ }
+
+ switch opType {
+ // Ops that don't change the total length matched
+ case urxReservedOp,
+ urxEnd,
+ urxStringLen,
+ urxNop,
+ urxStartCapture,
+ urxEndCapture,
+ urxBackslashB,
+ urxBackslashBu,
+ urxBackslashG,
+ urxBackslashZ,
+ urxCaret,
+ urxDollar,
+ urxDollarM,
+ urxDollarD,
+ urxDollarMd,
+ urxRelocOprnd,
+ urxStoInpLoc,
+ urxCaretM,
+ urxCaretMUnix,
+ urxBackref, // BackRef. Must assume that it might be a zero length match
+ urxBackrefI,
+ urxStoSp, // Setup for atomic or possessive blocks. Doesn't change what can match.
+ urxLdSp,
+ urxJmpSav,
+ urxJmpSavX:
+ // no-op
+
+ // Ops that match a minimum of one character (one or two 16 bit code units.)
+ //
+ case urxOnechar,
+ urxStaticSetref,
+ urxStatSetrefN,
+ urxSetref,
+ urxBackslashD,
+ urxBackslashH,
+ urxBackslashR,
+ urxBackslashV,
+ urcOnecharI,
+ urxBackslashX, // Grapheme Cluster. Minimum is 1, max unbounded.
+ urxDotanyAll, // . matches one or two.
+ urxDotany,
+ urxDotanyUnix:
+ currentLen = safeIncrement(currentLen, 1)
+
+ case urxJmpx:
+ loc++ // URX_JMPX has an extra operand, ignored here, otherwise processed identically to URX_JMP.
+ fallthrough
+
+ case urxJmp:
+ jmpDest := op.value()
+ if jmpDest < loc {
+ // Loop of some kind. Can safely ignore, the worst that will happen
+ // is that we understate the true minimum length
+ currentLen = forwardedLength[loc+1]
+ } else {
+ // Forward jump. Propagate the current min length to the target loc of the jump.
+ if forwardedLength[jmpDest] > currentLen {
+ forwardedLength[jmpDest] = currentLen
+ }
+ }
+
+ case urxBacktrack:
+ // Back-tracks are kind of like a branch, except that the min length was
+ // propagated already, by the state save.
+ currentLen = forwardedLength[loc+1]
+
+ case urxStateSave:
+ // State Save, for forward jumps, propagate the current minimum.
+ // of the state save.
+ jmpDest := op.value()
+ if jmpDest > loc {
+ if currentLen < forwardedLength[jmpDest] {
+ forwardedLength[jmpDest] = currentLen
+ }
+ }
+
+ case urxString:
+ loc++
+ stringLenOp := c.out.compiledPat[loc]
+ currentLen = safeIncrement(currentLen, stringLenOp.value())
+
+ case urxStringI:
+ loc++
+ // TODO: with full case folding, matching input text may be shorter than
+ // the string we have here. More smarts could put some bounds on it.
+ // Assume a min length of one for now. A min length of zero causes
+ // optimization failures for a pattern like "string"+
+ // currentLen += URX_VAL(stringLenOp);
+ currentLen = safeIncrement(currentLen, 1)
+
+ case urxCtrInit, urxCtrInitNg:
+ // Loop Init Ops.
+ // If the min loop count == 0
+ // move loc forwards to the end of the loop, skipping over the body.
+ // If the min count is > 0,
+ // continue normal processing of the body of the loop.
+ loopEndOp := c.out.compiledPat[loc+1]
+ loopEndLoc := loopEndOp.value()
+ minLoopCount := c.out.compiledPat[loc+2]
+ if minLoopCount == 0 {
+ loc = loopEndLoc
+ } else {
+ loc += 3 // Skips over operands of CTR_INIT
+ }
+
+ case utxCtrLoop, urxCtrLoopNg:
+ // Loop ops. The jump is conditional, backwards only.
+
+ case urxLoopSrI, urxLoopDotI, urxLoopC:
+ // More loop ops. These state-save to themselves. don't change the minimum match - could match nothing at all.
+
+ case urxLaStart, urxLbStart:
+ // Look-around. Scan forward until the matching look-ahead end,
+ // without processing the look-around block. This is overly pessimistic for look-ahead,
+ // it assumes that the look-ahead match might be zero-length.
+ // TODO: Positive lookahead could recursively do the block, then continue
+ // with the longer of the block or the value coming in. Ticket 6060
+ var depth int32
+ if opType == urxLaStart {
+ depth = 2
+ } else {
+ depth = 1
+ }
+
+ for {
+ loc++
+ op = c.out.compiledPat[loc]
+ if op.typ() == urxLaStart {
+ // The boilerplate for look-ahead includes two LA_END instructions,
+ // Depth will be decremented by each one when it is seen.
+ depth += 2
+ }
+ if op.typ() == urxLbStart {
+ depth++
+ }
+ if op.typ() == urxLaEnd {
+ depth--
+ if depth == 0 {
+ break
+ }
+ }
+ if op.typ() == urxLbnEnd {
+ depth--
+ if depth == 0 {
+ break
+ }
+ }
+ if op.typ() == urxStateSave {
+ // Need this because neg lookahead blocks will FAIL to outside of the block.
+ jmpDest := op.value()
+ if jmpDest > loc {
+ if currentLen < forwardedLength[jmpDest] {
+ forwardedLength[jmpDest] = currentLen
+ }
+ }
+ }
+ }
+
+ case urxLaEnd, urxLbCont, urxLbEnd, urxLbnCount, urxLbnEnd:
+ // Only come here if the matching URX_LA_START or URX_LB_START was not in the
+ // range being sized, which happens when measuring size of look-behind blocks.
+
+ default:
+ panic("unreachable")
+ }
+ }
+
+ // We have finished walking through the ops. Check whether some forward jump
+ // propagated a shorter length to location end+1.
+ if forwardedLength[end+1] < currentLen {
+ currentLen = forwardedLength[end+1]
+ }
+
+ return currentLen
+}
+
+func (c *compiler) maxMatchLength(start, end int) int32 {
+ if c.err != nil {
+ return 0
+ }
+ var loc int
+ var currentLen int32
+
+ forwardedLength := make([]int32, end+1)
+
+ for loc = start; loc <= end; loc++ {
+ op := c.out.compiledPat[loc]
+ opType := op.typ()
+
+ // The loop is advancing linearly through the pattern.
+ // If the op we are now at was the destination of a branch in the pattern,
+ // and that path has a longer maximum length than the current accumulated value,
+ // replace the current accumulated value.
+ if forwardedLength[loc] > currentLen {
+ currentLen = forwardedLength[loc]
+ }
+
+ switch opType {
+ // Ops that don't change the total length matched
+ case urxReservedOp,
+ urxEnd,
+ urxStringLen,
+ urxNop,
+ urxStartCapture,
+ urxEndCapture,
+ urxBackslashB,
+ urxBackslashBu,
+ urxBackslashG,
+ urxBackslashZ,
+ urxCaret,
+ urxDollar,
+ urxDollarM,
+ urxDollarD,
+ urxDollarMd,
+ urxRelocOprnd,
+ urxStoInpLoc,
+ urxCaretM,
+ urxCaretMUnix,
+ urxStoSp, // Setup for atomic or possessive blocks. Doesn't change what can match.
+ urxLdSp,
+ urxLbEnd,
+ urxLbCont,
+ urxLbnCount,
+ urxLbnEnd:
+ // no-op
+
+ // Ops that increase that cause an unbounded increase in the length
+ // of a matched string, or that increase it a hard to characterize way.
+ // Call the max length unbounded, and stop further checking.
+ case urxBackref, // BackRef. Must assume that it might be a zero length match
+ urxBackrefI,
+ urxBackslashX: // Grapheme Cluster. Minimum is 1, max unbounded.
+ currentLen = math.MaxInt32
+
+ // Ops that match a max of one character (possibly two 16 bit code units.)
+ //
+ case urxStaticSetref,
+ urxStatSetrefN,
+ urxSetref,
+ urxBackslashD,
+ urxBackslashH,
+ urxBackslashR,
+ urxBackslashV,
+ urcOnecharI,
+ urxDotanyAll,
+ urxDotany,
+ urxDotanyUnix:
+ currentLen = safeIncrement(currentLen, 2)
+
+ // Single literal character. Increase current max length by one or two,
+ // depending on whether the char is in the supplementary range.
+ case urxOnechar:
+ currentLen = safeIncrement(currentLen, 1)
+ if op.value() > 0x10000 {
+ currentLen = safeIncrement(currentLen, 1)
+ }
+
+ // Jumps.
+ //
+ case urxJmp, urxJmpx, urxJmpSav, urxJmpSavX:
+ jmpDest := op.value()
+ if jmpDest < loc {
+ // Loop of some kind. Max match length is unbounded.
+ currentLen = math.MaxInt32
+ } else {
+ // Forward jump. Propagate the current min length to the target loc of the jump.
+ if forwardedLength[jmpDest] < currentLen {
+ forwardedLength[jmpDest] = currentLen
+ }
+ currentLen = 0
+ }
+
+ case urxBacktrack:
+ // back-tracks are kind of like a branch, except that the max length was
+ // propagated already, by the state save.
+ currentLen = forwardedLength[loc+1]
+
+ case urxStateSave:
+ // State Save, for forward jumps, propagate the current minimum.
+ // of the state save.
+ // For backwards jumps, they create a loop, maximum
+ // match length is unbounded.
+ jmpDest := op.value()
+ if jmpDest > loc {
+ if currentLen > forwardedLength[jmpDest] {
+ forwardedLength[jmpDest] = currentLen
+ }
+ } else {
+ currentLen = math.MaxInt32
+ }
+
+ case urxString:
+ loc++
+ stringLenOp := c.out.compiledPat[loc]
+ currentLen = safeIncrement(currentLen, stringLenOp.value())
+
+ case urxStringI:
+ // TODO: This code assumes that any user string that matches will be no longer
+ // than our compiled string, with case insensitive matching.
+ // Our compiled string has been case-folded already.
+ //
+ // Any matching user string will have no more code points than our
+ // compiled (folded) string. Folding may add code points, but
+ // not remove them.
+ //
+ // There is a potential problem if a supplemental code point
+ // case-folds to a BMP code point. In this case our compiled string
+ // could be shorter (in code units) than a matching user string.
+ //
+ // At this time (Unicode 6.1) there are no such characters, and this case
+ // is not being handled. A test, intltest regex/Bug9283, will fail if
+ // any problematic characters are added to Unicode.
+ //
+ // If this happens, we can make a set of the BMP chars that the
+ // troublesome supplementals fold to, scan our string, and bump the
+ // currentLen one extra for each that is found.
+ //
+ loc++
+ stringLenOp := c.out.compiledPat[loc]
+ currentLen = safeIncrement(currentLen, stringLenOp.value())
+
+ case urxCtrInit, urxCtrInitNg:
+ // For Loops, recursively call this function on the pattern for the loop body,
+ // then multiply the result by the maximum loop count.
+ loopEndLoc := c.out.compiledPat[loc+1].value()
+ if loopEndLoc == loc+4 {
+ // Loop has an empty body. No affect on max match length.
+ // Continue processing with code after the loop end.
+ loc = loopEndLoc
+ break
+ }
+
+ maxLoopCount := int(c.out.compiledPat[loc+3])
+ if maxLoopCount == -1 {
+ // Unbounded Loop. No upper bound on match length.
+ currentLen = math.MaxInt32
+ break
+ }
+
+ blockLen := c.maxMatchLength(loc+4, loopEndLoc-1) // Recursive call.
+ updatedLen := int(currentLen) + int(blockLen)*maxLoopCount
+ if updatedLen >= math.MaxInt32 {
+ currentLen = math.MaxInt32
+ break
+ }
+ currentLen = int32(updatedLen)
+ loc = loopEndLoc
+
+ case utxCtrLoop, urxCtrLoopNg:
+ panic("should not encounter this opcode")
+
+ case urxLoopSrI, urxLoopDotI, urxLoopC:
+ // For anything to do with loops, make the match length unbounded.
+ currentLen = math.MaxInt32
+
+ case urxLaStart, urxLaEnd:
+ // Look-ahead. Just ignore, treat the look-ahead block as if
+ // it were normal pattern. Gives a too-long match length,
+ // but good enough for now.
+
+ case urxLbStart:
+ // Look-behind. Scan forward until the matching look-around end,
+ // without processing the look-behind block.
+ dataLoc := op.value()
+ for loc = loc + 1; loc <= end; loc++ {
+ op = c.out.compiledPat[loc]
+ if (op.typ() == urxLaEnd || op.typ() == urxLbnEnd) && (op.value() == dataLoc) {
+ break
+ }
+ }
+
+ default:
+ panic("unreachable")
+ }
+
+ if currentLen == math.MaxInt32 {
+ // The maximum length is unbounded.
+ // Stop further processing of the pattern.
+ break
+ }
+ }
+
+ return currentLen
+}
+
+// Machine Generated below.
+// It may need updating with new versions of Unicode.
+// Intltest test RegexTest::TestCaseInsensitiveStarters will fail if an update is needed.
+// The update tool is here:
+// svn+ssh://source.icu-project.org/repos/icu/tools/trunk/unicode/c/genregexcasing
+
+// Machine Generated Data. Do not hand edit.
+var reCaseFixCodePoints = [...]rune{
+ 0x61, 0x66, 0x68, 0x69, 0x6a, 0x73, 0x74, 0x77, 0x79, 0x2bc,
+ 0x3ac, 0x3ae, 0x3b1, 0x3b7, 0x3b9, 0x3c1, 0x3c5, 0x3c9, 0x3ce, 0x565,
+ 0x574, 0x57e, 0x1f00, 0x1f01, 0x1f02, 0x1f03, 0x1f04, 0x1f05, 0x1f06, 0x1f07,
+ 0x1f20, 0x1f21, 0x1f22, 0x1f23, 0x1f24, 0x1f25, 0x1f26, 0x1f27, 0x1f60, 0x1f61,
+ 0x1f62, 0x1f63, 0x1f64, 0x1f65, 0x1f66, 0x1f67, 0x1f70, 0x1f74, 0x1f7c, 0x110000}
+
+var reCaseFixStringOffsets = [...]int16{
+ 0x0, 0x1, 0x6, 0x7, 0x8, 0x9, 0xd, 0xe, 0xf, 0x10, 0x11, 0x12, 0x13,
+ 0x17, 0x1b, 0x20, 0x21, 0x2a, 0x2e, 0x2f, 0x30, 0x34, 0x35, 0x37, 0x39, 0x3b,
+ 0x3d, 0x3f, 0x41, 0x43, 0x45, 0x47, 0x49, 0x4b, 0x4d, 0x4f, 0x51, 0x53, 0x55,
+ 0x57, 0x59, 0x5b, 0x5d, 0x5f, 0x61, 0x63, 0x65, 0x66, 0x67, 0}
+
+var reCaseFixCounts = [...]int16{
+ 0x1, 0x5, 0x1, 0x1, 0x1, 0x4, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x4, 0x4, 0x5, 0x1, 0x9,
+ 0x4, 0x1, 0x1, 0x4, 0x1, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2,
+ 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x2, 0x1, 0x1, 0x1, 0}
+
+var reCaseFixData = [...]uint16{
+ 0x1e9a, 0xfb00, 0xfb01, 0xfb02, 0xfb03, 0xfb04, 0x1e96, 0x130, 0x1f0, 0xdf, 0x1e9e, 0xfb05,
+ 0xfb06, 0x1e97, 0x1e98, 0x1e99, 0x149, 0x1fb4, 0x1fc4, 0x1fb3, 0x1fb6, 0x1fb7, 0x1fbc, 0x1fc3,
+ 0x1fc6, 0x1fc7, 0x1fcc, 0x390, 0x1fd2, 0x1fd3, 0x1fd6, 0x1fd7, 0x1fe4, 0x3b0, 0x1f50, 0x1f52,
+ 0x1f54, 0x1f56, 0x1fe2, 0x1fe3, 0x1fe6, 0x1fe7, 0x1ff3, 0x1ff6, 0x1ff7, 0x1ffc, 0x1ff4, 0x587,
+ 0xfb13, 0xfb14, 0xfb15, 0xfb17, 0xfb16, 0x1f80, 0x1f88, 0x1f81, 0x1f89, 0x1f82, 0x1f8a, 0x1f83,
+ 0x1f8b, 0x1f84, 0x1f8c, 0x1f85, 0x1f8d, 0x1f86, 0x1f8e, 0x1f87, 0x1f8f, 0x1f90, 0x1f98, 0x1f91,
+ 0x1f99, 0x1f92, 0x1f9a, 0x1f93, 0x1f9b, 0x1f94, 0x1f9c, 0x1f95, 0x1f9d, 0x1f96, 0x1f9e, 0x1f97,
+ 0x1f9f, 0x1fa0, 0x1fa8, 0x1fa1, 0x1fa9, 0x1fa2, 0x1faa, 0x1fa3, 0x1fab, 0x1fa4, 0x1fac, 0x1fa5,
+ 0x1fad, 0x1fa6, 0x1fae, 0x1fa7, 0x1faf, 0x1fb2, 0x1fc2, 0x1ff2, 0}
+
+func (c *compiler) findCaseInsensitiveStarters(ch rune, starterChars *uset.UnicodeSet) {
+ if uprops.HasBinaryProperty(ch, uprops.UCharCaseSensitive) {
+ caseFoldedC := ucase.Fold(ch)
+ starterChars.Clear()
+ starterChars.AddRune(caseFoldedC)
+
+ var i int
+ for i = 0; reCaseFixCodePoints[i] < ch; i++ {
+ // Simple linear search through the sorted list of interesting code points.
+ }
+
+ if reCaseFixCodePoints[i] == ch {
+ data := reCaseFixData[reCaseFixStringOffsets[i]:]
+ numCharsToAdd := reCaseFixCounts[i]
+ for j := int16(0); j < numCharsToAdd; j++ {
+ var cpToAdd rune
+ cpToAdd, data = utf16.NextUnsafe(data)
+ starterChars.AddRune(cpToAdd)
+ }
+ }
+
+ starterChars.CloseOver(uset.CaseInsensitive)
+ } else {
+ // Not a cased character. Just return it alone.
+ starterChars.Clear()
+ starterChars.AddRune(ch)
+ }
+}
+
+func (c *compiler) scanProp() *uset.UnicodeSet {
+ if c.err != nil {
+ return nil
+ }
+ negated := c.c.char == chP
+
+ c.nextChar(&c.c)
+ if c.c.char != chLBrace {
+ c.error(PropertySyntax)
+ return nil
+ }
+
+ var propertyName strings.Builder
+ for {
+ c.nextChar(&c.c)
+ if c.c.char == chRBrace {
+ break
+ }
+ if c.c.char == -1 {
+ c.error(PropertySyntax)
+ return nil
+ }
+ propertyName.WriteRune(c.c.char)
+ }
+
+ ss := c.createSetForProperty(propertyName.String(), negated)
+ c.nextChar(&c.c)
+ return ss
+}
+
+func (c *compiler) createSetForProperty(propName string, negated bool) *uset.UnicodeSet {
+ if c.err != nil {
+ return nil
+ }
+
+ var set *uset.UnicodeSet
+
+ var usetFlags uset.USet
+ if c.modeFlags&CaseInsensitive != 0 {
+ usetFlags |= uset.CaseInsensitive
+ }
+
+ var err error
+ set, err = uprops.NewUnicodeSetFomPattern("\\p{"+propName+"}", usetFlags)
+ if err == nil {
+ goto done
+ }
+
+ //
+ // The incoming property wasn't directly recognized by ICU.
+
+ // Check [:word:] and [:all:]. These are not recognized as a properties by ICU UnicodeSet.
+ // Java accepts 'word' with mixed case.
+ // Java accepts 'all' only in all lower case.
+ if strings.EqualFold(propName, "word") {
+ set = staticPropertySets[urxIswordSet].Clone()
+ goto done
+ }
+ if propName == "all" {
+ set = uset.New()
+ set.AddRuneRange(0, 0x10ffff)
+ goto done
+ }
+
+ // Do Java InBlock expressions
+ //
+ if strings.HasPrefix(propName, "In") && len(propName) >= 3 {
+ set = uset.New()
+ if uprops.ApplyPropertyAlias(set, "Block", propName[2:]) != nil {
+ c.error(PropertySyntax)
+ }
+ goto done
+ }
+
+ // Check for the Java form "IsBooleanPropertyValue", which we will recast
+ // as "BooleanPropertyValue". The property value can be either a
+ // a General Category or a Script Name.
+ if strings.HasPrefix(propName, "Is") && len(propName) >= 3 {
+ mPropName := propName[2:]
+ if strings.IndexByte(mPropName, '=') >= 0 {
+ c.error(PropertySyntax)
+ goto done
+ }
+
+ if strings.EqualFold(mPropName, "assigned") {
+ mPropName = "unassigned"
+ negated = !negated
+ } else if strings.EqualFold(mPropName, "TitleCase") {
+ mPropName = "Titlecase_Letter"
+ }
+
+ set, err = uprops.NewUnicodeSetFomPattern("\\p{"+mPropName+"}", 0)
+ if err != nil {
+ c.error(PropertySyntax)
+ } else if !set.IsEmpty() && (usetFlags&uset.CaseInsensitive) != 0 {
+ set.CloseOver(uset.CaseInsensitive)
+ }
+ goto done
+ }
+
+ if strings.HasPrefix(propName, "java") {
+ set = uset.New()
+
+ //
+ // Try the various Java specific properties.
+ // These all begin with "java"
+ //
+ if propName == "javaDefined" {
+ c.err = uprops.AddCategory(set, uchar.GcCnMask)
+ set.Complement()
+ } else if propName == "javaDigit" {
+ c.err = uprops.AddCategory(set, uchar.GcNdMask)
+ } else if propName == "javaIdentifierIgnorable" {
+ c.err = addIdentifierIgnorable(set)
+ } else if propName == "javaISOControl" {
+ set.AddRuneRange(0, 0x1F)
+ set.AddRuneRange(0x7F, 0x9F)
+ } else if propName == "javaJavaIdentifierPart" {
+ c.err = uprops.AddCategory(set, uchar.GcLMask)
+ if c.err == nil {
+ c.err = uprops.AddCategory(set, uchar.GcScMask)
+ }
+ if c.err == nil {
+ c.err = uprops.AddCategory(set, uchar.GcPcMask)
+ }
+ if c.err == nil {
+ c.err = uprops.AddCategory(set, uchar.GcNdMask)
+ }
+ if c.err == nil {
+ c.err = uprops.AddCategory(set, uchar.GcNlMask)
+ }
+ if c.err == nil {
+ c.err = uprops.AddCategory(set, uchar.GcMcMask)
+ }
+ if c.err == nil {
+ c.err = uprops.AddCategory(set, uchar.GcMnMask)
+ }
+ if c.err == nil {
+ c.err = addIdentifierIgnorable(set)
+ }
+ } else if propName == "javaJavaIdentifierStart" {
+ c.err = uprops.AddCategory(set, uchar.GcLMask)
+ if c.err == nil {
+ c.err = uprops.AddCategory(set, uchar.GcNlMask)
+ }
+ if c.err == nil {
+ c.err = uprops.AddCategory(set, uchar.GcScMask)
+ }
+ if c.err == nil {
+ c.err = uprops.AddCategory(set, uchar.GcPcMask)
+ }
+ } else if propName == "javaLetter" {
+ c.err = uprops.AddCategory(set, uchar.GcLMask)
+ } else if propName == "javaLetterOrDigit" {
+ c.err = uprops.AddCategory(set, uchar.GcLMask)
+ if c.err == nil {
+ c.err = uprops.AddCategory(set, uchar.GcNdMask)
+ }
+ } else if propName == "javaLowerCase" {
+ c.err = uprops.AddCategory(set, uchar.GcLlMask)
+ } else if propName == "javaMirrored" {
+ c.err = uprops.ApplyIntPropertyValue(set, uprops.UCharBidiMirrored, 1)
+ } else if propName == "javaSpaceChar" {
+ c.err = uprops.AddCategory(set, uchar.GcZMask)
+ } else if propName == "javaSupplementaryCodePoint" {
+ set.AddRuneRange(0x10000, uset.MaxValue)
+ } else if propName == "javaTitleCase" {
+ c.err = uprops.AddCategory(set, uchar.GcLtMask)
+ } else if propName == "javaUnicodeIdentifierStart" {
+ c.err = uprops.AddCategory(set, uchar.GcLMask)
+ if c.err == nil {
+ c.err = uprops.AddCategory(set, uchar.GcNlMask)
+ }
+ } else if propName == "javaUnicodeIdentifierPart" {
+ c.err = uprops.AddCategory(set, uchar.GcLMask)
+ if c.err == nil {
+ c.err = uprops.AddCategory(set, uchar.GcPcMask)
+ }
+ if c.err == nil {
+ c.err = uprops.AddCategory(set, uchar.GcNdMask)
+ }
+ if c.err == nil {
+ c.err = uprops.AddCategory(set, uchar.GcNlMask)
+ }
+ if c.err == nil {
+ c.err = uprops.AddCategory(set, uchar.GcMcMask)
+ }
+ if c.err == nil {
+ c.err = uprops.AddCategory(set, uchar.GcMnMask)
+ }
+ if c.err == nil {
+ c.err = addIdentifierIgnorable(set)
+ }
+ } else if propName == "javaUpperCase" {
+ c.err = uprops.AddCategory(set, uchar.GcLuMask)
+ } else if propName == "javaValidCodePoint" {
+ set.AddRuneRange(0, uset.MaxValue)
+ } else if propName == "javaWhitespace" {
+ c.err = uprops.AddCategory(set, uchar.GcZMask)
+ excl := uset.New()
+ excl.AddRune(0x0a)
+ excl.AddRune(0x2007)
+ excl.AddRune(0x202f)
+ set.RemoveAll(excl)
+ set.AddRuneRange(9, 0x0d)
+ set.AddRuneRange(0x1c, 0x1f)
+ } else {
+ c.error(PropertySyntax)
+ }
+
+ if c.err == nil && !set.IsEmpty() && (usetFlags&uset.CaseInsensitive) != 0 {
+ set.CloseOver(uset.CaseInsensitive)
+ }
+ goto done
+ }
+
+ // Unrecognized property. ICU didn't like it as it was, and none of the Java compatibility
+ // extensions matched it.
+ c.error(PropertySyntax)
+
+done:
+ if c.err != nil {
+ return nil
+ }
+ if negated {
+ set.Complement()
+ }
+ return set
+}
+
+func addIdentifierIgnorable(set *uset.UnicodeSet) error {
+ set.AddRuneRange(0, 8)
+ set.AddRuneRange(0x0e, 0x1b)
+ set.AddRuneRange(0x7f, 0x9f)
+
+ return uprops.AddCategory(set, uchar.GcCfMask)
+}
+
+func (c *compiler) scanPosixProp() *uset.UnicodeSet {
+ var set *uset.UnicodeSet
+
+ if !(c.c.char == chColon) {
+ panic("assertion failed: c.lastChar == ':'")
+ }
+
+ savedScanIndex := c.scanIndex
+ savedScanPattern := c.p
+ savedQuoteMode := c.quoteMode
+ savedInBackslashQuote := c.inBackslashQuote
+ savedEOLComments := c.eolComments
+ savedLineNum := c.lineNum
+ savedCharNum := c.charNum
+ savedLastChar := c.lastChar
+ savedPeekChar := c.peekChar
+ savedC := c.c
+
+ // Scan for a closing ]. A little tricky because there are some perverse
+ // edge cases possible. "[:abc\Qdef:] \E]" is a valid non-property expression,
+ // ending on the second closing ].
+ var propName []rune
+ negated := false
+
+ // Check for and consume the '^' in a negated POSIX property, e.g. [:^Letter:]
+ c.nextChar(&c.c)
+ if c.c.char == chUp {
+ negated = true
+ c.nextChar(&c.c)
+ }
+
+ // Scan for the closing ":]", collecting the property name along the way.
+ sawPropSetTerminator := false
+ for {
+ propName = append(propName, c.c.char)
+ c.nextChar(&c.c)
+ if c.c.quoted || c.c.char == -1 {
+ // Escaped characters or end of input - either says this isn't a [:Property:]
+ break
+ }
+ if c.c.char == chColon {
+ c.nextChar(&c.c)
+ if c.c.char == chRBracket {
+ sawPropSetTerminator = true
+ break
+ }
+ }
+ }
+
+ if sawPropSetTerminator {
+ set = c.createSetForProperty(string(propName), negated)
+ } else {
+ // No closing ']' - not a [:Property:]
+ // Restore the original scan position.
+ // The main scanner will retry the input as a normal set expression,
+ // not a [:Property:] expression.
+ c.scanIndex = savedScanIndex
+ c.p = savedScanPattern
+ c.quoteMode = savedQuoteMode
+ c.inBackslashQuote = savedInBackslashQuote
+ c.eolComments = savedEOLComments
+ c.lineNum = savedLineNum
+ c.charNum = savedCharNum
+ c.lastChar = savedLastChar
+ c.peekChar = savedPeekChar
+ c.c = savedC
+ }
+
+ return set
+}
+
+func (c *compiler) compileSet(set *uset.UnicodeSet) {
+ if set == nil {
+ return
+ }
+ // Remove any strings from the set.
+ // There shoudn't be any, but just in case.
+ // (Case Closure can add them; if we had a simple case closure available that
+ // ignored strings, that would be better.)
+ setSize := set.Len()
+
+ switch setSize {
+ case 0:
+ // Set of no elements. Always fails to match.
+ c.appendOp(urxBacktrack, 0)
+
+ case 1:
+ // The set contains only a single code point. Put it into
+ // the compiled pattern as a single char operation rather
+ // than a set, and discard the set itself.
+ c.literalChar(set.RuneAt(0))
+
+ default:
+ // The set contains two or more chars. (the normal case)
+ // Put it into the compiled pattern as a set.
+ // theSet->freeze();
+ setNumber := len(c.out.sets)
+ c.out.sets = append(c.out.sets, set)
+ c.appendOp(urxSetref, setNumber)
+ }
+}
diff --git a/go/mysql/icuregex/compiler_table.go b/go/mysql/icuregex/compiler_table.go
new file mode 100644
index 00000000000..e8cfe0d5e55
--- /dev/null
+++ b/go/mysql/icuregex/compiler_table.go
@@ -0,0 +1,357 @@
+/*
+© 2016 and later: Unicode, Inc. and others.
+Copyright (C) 2004-2015, International Business Machines Corporation and others.
+Copyright 2023 The Vitess Authors.
+
+This file contains code derived from the Unicode Project's ICU library.
+License & terms of use for the original code: http://www.unicode.org/copyright.html
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package icuregex
+
+type patternParseAction uint8
+
+const (
+ doSetBackslashD patternParseAction = iota
+ doBackslashh
+ doBackslashH
+ doSetLiteralEscaped
+ doOpenLookAheadNeg
+ doCompleteNamedBackRef
+ doPatStart
+ doBackslashS
+ doBackslashD
+ doNGStar
+ doNOP
+ doBackslashX
+ doSetLiteral
+ doContinueNamedCapture
+ doBackslashG
+ doBackslashR
+ doSetBegin
+ doSetBackslashv
+ doPossessivePlus
+ doPerlInline
+ doBackslashZ
+ doSetAddAmp
+ doSetBeginDifference1
+ doIntervalError
+ doSetNegate
+ doIntervalInit
+ doSetIntersection2
+ doPossessiveInterval
+ doRuleError
+ doBackslashW
+ doContinueNamedBackRef
+ doOpenNonCaptureParen
+ doExit
+ doSetNamedChar
+ doSetBackslashV
+ doConditionalExpr
+ doEscapeError
+ doBadOpenParenType
+ doPossessiveStar
+ doSetAddDash
+ doEscapedLiteralChar
+ doSetBackslashw
+ doIntervalUpperDigit
+ doBackslashv
+ doSetBackslashS
+ doSetNoCloseError
+ doSetProp
+ doBackslashB
+ doSetEnd
+ doSetRange
+ doMatchModeParen
+ doPlus
+ doBackslashV
+ doSetMatchMode
+ doBackslashz
+ doSetNamedRange
+ doOpenLookBehindNeg
+ doInterval
+ doBadNamedCapture
+ doBeginMatchMode
+ doBackslashd
+ doPatFinish
+ doNamedChar
+ doNGPlus
+ doSetDifference2
+ doSetBackslashH
+ doCloseParen
+ doDotAny
+ doOpenCaptureParen
+ doEnterQuoteMode
+ doOpenAtomicParen
+ doBadModeFlag
+ doSetBackslashd
+ doSetFinish
+ doProperty
+ doBeginNamedBackRef
+ doBackRef
+ doOpt
+ doDollar
+ doBeginNamedCapture
+ doNGInterval
+ doSetOpError
+ doSetPosixProp
+ doSetBeginIntersection1
+ doBackslashb
+ doSetBeginUnion
+ doIntevalLowerDigit
+ doSetBackslashh
+ doStar
+ doMatchMode
+ doBackslashA
+ doOpenLookBehind
+ doPossessiveOpt
+ doOrOperator
+ doBackslashw
+ doBackslashs
+ doLiteralChar
+ doSuppressComments
+ doCaret
+ doIntervalSame
+ doNGOpt
+ doOpenLookAhead
+ doSetBackslashW
+ doMismatchedParenErr
+ doSetBackslashs
+ rbbiLastAction
+)
+
+// -------------------------------------------------------------------------------
+//
+// RegexTableEl represents the structure of a row in the transition table
+// for the pattern parser state machine.
+//
+// -------------------------------------------------------------------------------
+type regexTableEl struct {
+ action patternParseAction
+ charClass uint8
+ nextState uint8
+ pushState uint8
+ nextChar bool
+}
+
+var parseStateTable = []regexTableEl{
+ {doNOP, 0, 0, 0, true},
+ {doPatStart, 255, 2, 0, false}, // 1 start
+ {doLiteralChar, 254, 14, 0, true}, // 2 term
+ {doLiteralChar, 130, 14, 0, true}, // 3
+ {doSetBegin, 91 /* [ */, 123, 205, true}, // 4
+ {doNOP, 40 /* ( */, 27, 0, true}, // 5
+ {doDotAny, 46 /* . */, 14, 0, true}, // 6
+ {doCaret, 94 /* ^ */, 14, 0, true}, // 7
+ {doDollar, 36 /* $ */, 14, 0, true}, // 8
+ {doNOP, 92 /* \ */, 89, 0, true}, // 9
+ {doOrOperator, 124 /* | */, 2, 0, true}, // 10
+ {doCloseParen, 41 /* ) */, 255, 0, true}, // 11
+ {doPatFinish, 253, 2, 0, false}, // 12
+ {doRuleError, 255, 206, 0, false}, // 13
+ {doNOP, 42 /* * */, 68, 0, true}, // 14 expr-quant
+ {doNOP, 43 /* + */, 71, 0, true}, // 15
+ {doNOP, 63 /* ? */, 74, 0, true}, // 16
+ {doIntervalInit, 123 /* { */, 77, 0, true}, // 17
+ {doNOP, 40 /* ( */, 23, 0, true}, // 18
+ {doNOP, 255, 20, 0, false}, // 19
+ {doOrOperator, 124 /* | */, 2, 0, true}, // 20 expr-cont
+ {doCloseParen, 41 /* ) */, 255, 0, true}, // 21
+ {doNOP, 255, 2, 0, false}, // 22
+ {doSuppressComments, 63 /* ? */, 25, 0, true}, // 23 open-paren-quant
+ {doNOP, 255, 27, 0, false}, // 24
+ {doNOP, 35 /* # */, 50, 14, true}, // 25 open-paren-quant2
+ {doNOP, 255, 29, 0, false}, // 26
+ {doSuppressComments, 63 /* ? */, 29, 0, true}, // 27 open-paren
+ {doOpenCaptureParen, 255, 2, 14, false}, // 28
+ {doOpenNonCaptureParen, 58 /* : */, 2, 14, true}, // 29 open-paren-extended
+ {doOpenAtomicParen, 62 /* > */, 2, 14, true}, // 30
+ {doOpenLookAhead, 61 /* = */, 2, 20, true}, // 31
+ {doOpenLookAheadNeg, 33 /* ! */, 2, 20, true}, // 32
+ {doNOP, 60 /* < */, 46, 0, true}, // 33
+ {doNOP, 35 /* # */, 50, 2, true}, // 34
+ {doBeginMatchMode, 105 /* i */, 53, 0, false}, // 35
+ {doBeginMatchMode, 100 /* d */, 53, 0, false}, // 36
+ {doBeginMatchMode, 109 /* m */, 53, 0, false}, // 37
+ {doBeginMatchMode, 115 /* s */, 53, 0, false}, // 38
+ {doBeginMatchMode, 117 /* u */, 53, 0, false}, // 39
+ {doBeginMatchMode, 119 /* w */, 53, 0, false}, // 40
+ {doBeginMatchMode, 120 /* x */, 53, 0, false}, // 41
+ {doBeginMatchMode, 45 /* - */, 53, 0, false}, // 42
+ {doConditionalExpr, 40 /* ( */, 206, 0, true}, // 43
+ {doPerlInline, 123 /* { */, 206, 0, true}, // 44
+ {doBadOpenParenType, 255, 206, 0, false}, // 45
+ {doOpenLookBehind, 61 /* = */, 2, 20, true}, // 46 open-paren-lookbehind
+ {doOpenLookBehindNeg, 33 /* ! */, 2, 20, true}, // 47
+ {doBeginNamedCapture, 129, 64, 0, false}, // 48
+ {doBadOpenParenType, 255, 206, 0, false}, // 49
+ {doNOP, 41 /* ) */, 255, 0, true}, // 50 paren-comment
+ {doMismatchedParenErr, 253, 206, 0, false}, // 51
+ {doNOP, 255, 50, 0, true}, // 52
+ {doMatchMode, 105 /* i */, 53, 0, true}, // 53 paren-flag
+ {doMatchMode, 100 /* d */, 53, 0, true}, // 54
+ {doMatchMode, 109 /* m */, 53, 0, true}, // 55
+ {doMatchMode, 115 /* s */, 53, 0, true}, // 56
+ {doMatchMode, 117 /* u */, 53, 0, true}, // 57
+ {doMatchMode, 119 /* w */, 53, 0, true}, // 58
+ {doMatchMode, 120 /* x */, 53, 0, true}, // 59
+ {doMatchMode, 45 /* - */, 53, 0, true}, // 60
+ {doSetMatchMode, 41 /* ) */, 2, 0, true}, // 61
+ {doMatchModeParen, 58 /* : */, 2, 14, true}, // 62
+ {doBadModeFlag, 255, 206, 0, false}, // 63
+ {doContinueNamedCapture, 129, 64, 0, true}, // 64 named-capture
+ {doContinueNamedCapture, 128, 64, 0, true}, // 65
+ {doOpenCaptureParen, 62 /* > */, 2, 14, true}, // 66
+ {doBadNamedCapture, 255, 206, 0, false}, // 67
+ {doNGStar, 63 /* ? */, 20, 0, true}, // 68 quant-star
+ {doPossessiveStar, 43 /* + */, 20, 0, true}, // 69
+ {doStar, 255, 20, 0, false}, // 70
+ {doNGPlus, 63 /* ? */, 20, 0, true}, // 71 quant-plus
+ {doPossessivePlus, 43 /* + */, 20, 0, true}, // 72
+ {doPlus, 255, 20, 0, false}, // 73
+ {doNGOpt, 63 /* ? */, 20, 0, true}, // 74 quant-opt
+ {doPossessiveOpt, 43 /* + */, 20, 0, true}, // 75
+ {doOpt, 255, 20, 0, false}, // 76
+ {doNOP, 128, 79, 0, false}, // 77 interval-open
+ {doIntervalError, 255, 206, 0, false}, // 78
+ {doIntevalLowerDigit, 128, 79, 0, true}, // 79 interval-lower
+ {doNOP, 44 /* , */, 83, 0, true}, // 80
+ {doIntervalSame, 125 /* } */, 86, 0, true}, // 81
+ {doIntervalError, 255, 206, 0, false}, // 82
+ {doIntervalUpperDigit, 128, 83, 0, true}, // 83 interval-upper
+ {doNOP, 125 /* } */, 86, 0, true}, // 84
+ {doIntervalError, 255, 206, 0, false}, // 85
+ {doNGInterval, 63 /* ? */, 20, 0, true}, // 86 interval-type
+ {doPossessiveInterval, 43 /* + */, 20, 0, true}, // 87
+ {doInterval, 255, 20, 0, false}, // 88
+ {doBackslashA, 65 /* A */, 2, 0, true}, // 89 backslash
+ {doBackslashB, 66 /* B */, 2, 0, true}, // 90
+ {doBackslashb, 98 /* b */, 2, 0, true}, // 91
+ {doBackslashd, 100 /* d */, 14, 0, true}, // 92
+ {doBackslashD, 68 /* D */, 14, 0, true}, // 93
+ {doBackslashG, 71 /* G */, 2, 0, true}, // 94
+ {doBackslashh, 104 /* h */, 14, 0, true}, // 95
+ {doBackslashH, 72 /* H */, 14, 0, true}, // 96
+ {doNOP, 107 /* k */, 115, 0, true}, // 97
+ {doNamedChar, 78 /* N */, 14, 0, false}, // 98
+ {doProperty, 112 /* p */, 14, 0, false}, // 99
+ {doProperty, 80 /* P */, 14, 0, false}, // 100
+ {doBackslashR, 82 /* R */, 14, 0, true}, // 101
+ {doEnterQuoteMode, 81 /* Q */, 2, 0, true}, // 102
+ {doBackslashS, 83 /* S */, 14, 0, true}, // 103
+ {doBackslashs, 115 /* s */, 14, 0, true}, // 104
+ {doBackslashv, 118 /* v */, 14, 0, true}, // 105
+ {doBackslashV, 86 /* V */, 14, 0, true}, // 106
+ {doBackslashW, 87 /* W */, 14, 0, true}, // 107
+ {doBackslashw, 119 /* w */, 14, 0, true}, // 108
+ {doBackslashX, 88 /* X */, 14, 0, true}, // 109
+ {doBackslashZ, 90 /* Z */, 2, 0, true}, // 110
+ {doBackslashz, 122 /* z */, 2, 0, true}, // 111
+ {doBackRef, 128, 14, 0, true}, // 112
+ {doEscapeError, 253, 206, 0, false}, // 113
+ {doEscapedLiteralChar, 255, 14, 0, true}, // 114
+ {doBeginNamedBackRef, 60 /* < */, 117, 0, true}, // 115 named-backref
+ {doBadNamedCapture, 255, 206, 0, false}, // 116
+ {doContinueNamedBackRef, 129, 119, 0, true}, // 117 named-backref-2
+ {doBadNamedCapture, 255, 206, 0, false}, // 118
+ {doContinueNamedBackRef, 129, 119, 0, true}, // 119 named-backref-3
+ {doContinueNamedBackRef, 128, 119, 0, true}, // 120
+ {doCompleteNamedBackRef, 62 /* > */, 14, 0, true}, // 121
+ {doBadNamedCapture, 255, 206, 0, false}, // 122
+ {doSetNegate, 94 /* ^ */, 126, 0, true}, // 123 set-open
+ {doSetPosixProp, 58 /* : */, 128, 0, false}, // 124
+ {doNOP, 255, 126, 0, false}, // 125
+ {doSetLiteral, 93 /* ] */, 141, 0, true}, // 126 set-open2
+ {doNOP, 255, 131, 0, false}, // 127
+ {doSetEnd, 93 /* ] */, 255, 0, true}, // 128 set-posix
+ {doNOP, 58 /* : */, 131, 0, false}, // 129
+ {doRuleError, 255, 206, 0, false}, // 130
+ {doSetEnd, 93 /* ] */, 255, 0, true}, // 131 set-start
+ {doSetBeginUnion, 91 /* [ */, 123, 148, true}, // 132
+ {doNOP, 92 /* \ */, 191, 0, true}, // 133
+ {doNOP, 45 /* - */, 137, 0, true}, // 134
+ {doNOP, 38 /* & */, 139, 0, true}, // 135
+ {doSetLiteral, 255, 141, 0, true}, // 136
+ {doRuleError, 45 /* - */, 206, 0, false}, // 137 set-start-dash
+ {doSetAddDash, 255, 141, 0, false}, // 138
+ {doRuleError, 38 /* & */, 206, 0, false}, // 139 set-start-amp
+ {doSetAddAmp, 255, 141, 0, false}, // 140
+ {doSetEnd, 93 /* ] */, 255, 0, true}, // 141 set-after-lit
+ {doSetBeginUnion, 91 /* [ */, 123, 148, true}, // 142
+ {doNOP, 45 /* - */, 178, 0, true}, // 143
+ {doNOP, 38 /* & */, 169, 0, true}, // 144
+ {doNOP, 92 /* \ */, 191, 0, true}, // 145
+ {doSetNoCloseError, 253, 206, 0, false}, // 146
+ {doSetLiteral, 255, 141, 0, true}, // 147
+ {doSetEnd, 93 /* ] */, 255, 0, true}, // 148 set-after-set
+ {doSetBeginUnion, 91 /* [ */, 123, 148, true}, // 149
+ {doNOP, 45 /* - */, 171, 0, true}, // 150
+ {doNOP, 38 /* & */, 166, 0, true}, // 151
+ {doNOP, 92 /* \ */, 191, 0, true}, // 152
+ {doSetNoCloseError, 253, 206, 0, false}, // 153
+ {doSetLiteral, 255, 141, 0, true}, // 154
+ {doSetEnd, 93 /* ] */, 255, 0, true}, // 155 set-after-range
+ {doSetBeginUnion, 91 /* [ */, 123, 148, true}, // 156
+ {doNOP, 45 /* - */, 174, 0, true}, // 157
+ {doNOP, 38 /* & */, 176, 0, true}, // 158
+ {doNOP, 92 /* \ */, 191, 0, true}, // 159
+ {doSetNoCloseError, 253, 206, 0, false}, // 160
+ {doSetLiteral, 255, 141, 0, true}, // 161
+ {doSetBeginUnion, 91 /* [ */, 123, 148, true}, // 162 set-after-op
+ {doSetOpError, 93 /* ] */, 206, 0, false}, // 163
+ {doNOP, 92 /* \ */, 191, 0, true}, // 164
+ {doSetLiteral, 255, 141, 0, true}, // 165
+ {doSetBeginIntersection1, 91 /* [ */, 123, 148, true}, // 166 set-set-amp
+ {doSetIntersection2, 38 /* & */, 162, 0, true}, // 167
+ {doSetAddAmp, 255, 141, 0, false}, // 168
+ {doSetIntersection2, 38 /* & */, 162, 0, true}, // 169 set-lit-amp
+ {doSetAddAmp, 255, 141, 0, false}, // 170
+ {doSetBeginDifference1, 91 /* [ */, 123, 148, true}, // 171 set-set-dash
+ {doSetDifference2, 45 /* - */, 162, 0, true}, // 172
+ {doSetAddDash, 255, 141, 0, false}, // 173
+ {doSetDifference2, 45 /* - */, 162, 0, true}, // 174 set-range-dash
+ {doSetAddDash, 255, 141, 0, false}, // 175
+ {doSetIntersection2, 38 /* & */, 162, 0, true}, // 176 set-range-amp
+ {doSetAddAmp, 255, 141, 0, false}, // 177
+ {doSetDifference2, 45 /* - */, 162, 0, true}, // 178 set-lit-dash
+ {doSetAddDash, 91 /* [ */, 141, 0, false}, // 179
+ {doSetAddDash, 93 /* ] */, 141, 0, false}, // 180
+ {doNOP, 92 /* \ */, 183, 0, true}, // 181
+ {doSetRange, 255, 155, 0, true}, // 182
+ {doSetOpError, 115 /* s */, 206, 0, false}, // 183 set-lit-dash-escape
+ {doSetOpError, 83 /* S */, 206, 0, false}, // 184
+ {doSetOpError, 119 /* w */, 206, 0, false}, // 185
+ {doSetOpError, 87 /* W */, 206, 0, false}, // 186
+ {doSetOpError, 100 /* d */, 206, 0, false}, // 187
+ {doSetOpError, 68 /* D */, 206, 0, false}, // 188
+ {doSetNamedRange, 78 /* N */, 155, 0, false}, // 189
+ {doSetRange, 255, 155, 0, true}, // 190
+ {doSetProp, 112 /* p */, 148, 0, false}, // 191 set-escape
+ {doSetProp, 80 /* P */, 148, 0, false}, // 192
+ {doSetNamedChar, 78 /* N */, 141, 0, false}, // 193
+ {doSetBackslashs, 115 /* s */, 155, 0, true}, // 194
+ {doSetBackslashS, 83 /* S */, 155, 0, true}, // 195
+ {doSetBackslashw, 119 /* w */, 155, 0, true}, // 196
+ {doSetBackslashW, 87 /* W */, 155, 0, true}, // 197
+ {doSetBackslashd, 100 /* d */, 155, 0, true}, // 198
+ {doSetBackslashD, 68 /* D */, 155, 0, true}, // 199
+ {doSetBackslashh, 104 /* h */, 155, 0, true}, // 200
+ {doSetBackslashH, 72 /* H */, 155, 0, true}, // 201
+ {doSetBackslashv, 118 /* v */, 155, 0, true}, // 202
+ {doSetBackslashV, 86 /* V */, 155, 0, true}, // 203
+ {doSetLiteralEscaped, 255, 141, 0, true}, // 204
+ {doSetFinish, 255, 14, 0, false}, // 205 set-finish
+ {doExit, 255, 206, 0, true}, // 206 errorDeath
+}
diff --git a/go/mysql/icuregex/debug.go b/go/mysql/icuregex/debug.go
new file mode 100644
index 00000000000..92c43e704d7
--- /dev/null
+++ b/go/mysql/icuregex/debug.go
@@ -0,0 +1,151 @@
+/*
+© 2016 and later: Unicode, Inc. and others.
+Copyright (C) 2004-2015, International Business Machines Corporation and others.
+Copyright 2023 The Vitess Authors.
+
+This file contains code derived from the Unicode Project's ICU library.
+License & terms of use for the original code: http://www.unicode.org/copyright.html
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package icuregex
+
+import (
+ "fmt"
+ "io"
+)
+
+func (pat *Pattern) Dump(w io.Writer) {
+ fmt.Fprintf(w, "Original Pattern: \"%s\"\n", pat.pattern)
+ fmt.Fprintf(w, " Min Match Length: %d\n", pat.minMatchLen)
+ fmt.Fprintf(w, " Match Start Type: %v\n", pat.startType)
+ if pat.startType == startString {
+ fmt.Fprintf(w, " Initial match string: \"%s\"\n", string(pat.literalText[pat.initialStringIdx:pat.initialStringIdx+pat.initialStringLen]))
+ } else if pat.startType == startSet {
+ fmt.Fprintf(w, " Match First Chars: %s\n", pat.initialChars.String())
+ } else if pat.startType == startChar {
+ fmt.Fprintf(w, " First char of Match: ")
+ if pat.initialChar > 0x20 {
+ fmt.Fprintf(w, "'%c'\n", pat.initialChar)
+ } else {
+ fmt.Fprintf(w, "%#x\n", pat.initialChar)
+ }
+ }
+
+ fmt.Fprintf(w, "Named Capture Groups:\n")
+ if len(pat.namedCaptureMap) == 0 {
+ fmt.Fprintf(w, " None\n")
+ } else {
+ for name, number := range pat.namedCaptureMap {
+ fmt.Fprintf(w, " %d\t%s\n", number, name)
+ }
+ }
+
+ fmt.Fprintf(w, "\nIndex Binary Type Operand\n-------------------------------------------\n")
+ for idx := range pat.compiledPat {
+ pat.dumpOp(w, idx)
+ }
+ fmt.Fprintf(w, "\n\n")
+}
+
+func (pat *Pattern) dumpOp(w io.Writer, index int) {
+ op := pat.compiledPat[index]
+ val := op.value()
+ opType := op.typ()
+ pinnedType := opType
+ if int(pinnedType) >= len(urxOpcodeNames) {
+ pinnedType = 0
+ }
+
+ fmt.Fprintf(w, "%4d %08x %-15s ", index, op, urxOpcodeNames[pinnedType])
+
+ switch opType {
+ case urxNop,
+ urxDotany,
+ urxDotanyAll,
+ urxFail,
+ urxCaret,
+ urxDollar,
+ urxBackslashG,
+ urxBackslashX,
+ urxEnd,
+ urxDollarM,
+ urxCaretM:
+ // Types with no operand field of interest.
+
+ case urxReservedOp,
+ urxStartCapture,
+ urxEndCapture,
+ urxStateSave,
+ urxJmp,
+ urxJmpSav,
+ urxJmpSavX,
+ urxBackslashB,
+ urxBackslashBu,
+ urxBackslashD,
+ urxBackslashZ,
+ urxStringLen,
+ urxCtrInit,
+ urxCtrInitNg,
+ utxCtrLoop,
+ urxCtrLoopNg,
+ urxRelocOprnd,
+ urxStoSp,
+ urxLdSp,
+ urxBackref,
+ urxStoInpLoc,
+ urxJmpx,
+ urxLaStart,
+ urxLaEnd,
+ urxBackrefI,
+ urxLbStart,
+ urxLbCont,
+ urxLbEnd,
+ urxLbnCount,
+ urxLbnEnd,
+ urxLoopC,
+ urxLoopDotI,
+ urxBackslashH,
+ urxBackslashR,
+ urxBackslashV:
+ // types with an integer operand field.
+ fmt.Fprintf(w, "%d", val)
+
+ case urxOnechar, urcOnecharI:
+ if val < 0x20 {
+ fmt.Fprintf(w, "%#x", val)
+ } else {
+ fmt.Fprintf(w, "'%c'", rune(val))
+ }
+
+ case urxString, urxStringI:
+ lengthOp := pat.compiledPat[index+1]
+ length := lengthOp.value()
+ fmt.Fprintf(w, "%q", string(pat.literalText[val:val+length]))
+
+ case urxSetref, urxLoopSrI:
+ fmt.Fprintf(w, "%s", pat.sets[val].String())
+
+ case urxStaticSetref, urxStatSetrefN:
+ if (val & urxNegSet) != 0 {
+ fmt.Fprintf(w, "NOT ")
+ val &= ^urxNegSet
+ }
+ fmt.Fprintf(w, "%s", staticPropertySets[val].String())
+
+ default:
+ fmt.Fprintf(w, "??????")
+ }
+ fmt.Fprintf(w, "\n")
+}
diff --git a/go/mysql/icuregex/error.go b/go/mysql/icuregex/error.go
new file mode 100644
index 00000000000..39c92399aa9
--- /dev/null
+++ b/go/mysql/icuregex/error.go
@@ -0,0 +1,152 @@
+/*
+© 2016 and later: Unicode, Inc. and others.
+Copyright (C) 2004-2015, International Business Machines Corporation and others.
+Copyright 2023 The Vitess Authors.
+
+This file contains code derived from the Unicode Project's ICU library.
+License & terms of use for the original code: http://www.unicode.org/copyright.html
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package icuregex
+
+import (
+ "fmt"
+ "strings"
+)
+
+type CompileError struct {
+ Code CompileErrorCode
+ Line int
+ Offset int
+ Context string
+}
+
+func (e *CompileError) Error() string {
+ var out strings.Builder
+ switch e.Code {
+ case InternalError:
+ out.WriteString("Internal error")
+ case RuleSyntax:
+ out.WriteString("Syntax error")
+ case BadEscapeSequence:
+ out.WriteString("Bad escape sequence")
+ case PropertySyntax:
+ out.WriteString("Property syntax error")
+ case Unimplemented:
+ out.WriteString("Unimplemented")
+ case MismatchedParen:
+ out.WriteString("Mismatched parentheses")
+ case NumberTooBig:
+ out.WriteString("Number too big")
+ case BadInterval:
+ out.WriteString("Bad interval")
+ case MaxLtMin:
+ out.WriteString("Max less than min")
+ case InvalidBackRef:
+ out.WriteString("Invalid back reference")
+ case InvalidFlag:
+ out.WriteString("Invalid flag")
+ case LookBehindLimit:
+ out.WriteString("Look behind limit")
+ case MissingCloseBracket:
+ out.WriteString("Missing closing ]")
+ case InvalidRange:
+ out.WriteString("Invalid range")
+ case PatternTooBig:
+ out.WriteString("Pattern too big")
+ case InvalidCaptureGroupName:
+ out.WriteString("Invalid capture group name")
+ }
+ _, _ = fmt.Fprintf(&out, " in regular expression on line %d, character %d: `%s`", e.Line, e.Offset, e.Context)
+
+ return out.String()
+}
+
+type MatchError struct {
+ Code MatchErrorCode
+ Pattern string
+ Position int
+ Input []rune
+}
+
+const maxMatchInputLength = 20
+
+func (e *MatchError) Error() string {
+ var out strings.Builder
+ switch e.Code {
+ case StackOverflow:
+ out.WriteString("Stack overflow")
+ case TimeOut:
+ out.WriteString("Timeout")
+ case InternalMatchError:
+ out.WriteString("Internal error")
+ }
+
+ input := e.Input
+ if len(input) > maxMatchInputLength {
+ var b []rune
+ start := e.Position - maxMatchInputLength/2
+ if start < 0 {
+ start = 0
+ } else {
+ b = append(b, '.', '.', '.')
+ }
+ end := start + maxMatchInputLength
+ trailing := true
+ if end > len(input) {
+ end = len(input)
+ trailing = false
+ }
+ b = append(b, input[start:end]...)
+ if trailing {
+ b = append(b, '.', '.', '.')
+ }
+ input = b
+ }
+ _, _ = fmt.Fprintf(&out, " for expression `%s` at position %d in: %q", e.Pattern, e.Position, string(input))
+
+ return out.String()
+}
+
+type Code int32
+
+type CompileErrorCode int32
+
+const (
+ InternalError CompileErrorCode = iota + 1 /**< An internal error (bug) was detected. */
+ RuleSyntax /**< Syntax error in regexp pattern. */
+ BadEscapeSequence /**< Unrecognized backslash escape sequence in pattern */
+ PropertySyntax /**< Incorrect Unicode property */
+ Unimplemented /**< Use of regexp feature that is not yet implemented. */
+ MismatchedParen /**< Incorrectly nested parentheses in regexp pattern. */
+ NumberTooBig /**< Decimal number is too large. */
+ BadInterval /**< Error in {min,max} interval */
+ MaxLtMin /**< In {min,max}, max is less than min. */
+ InvalidBackRef /**< Back-reference to a non-existent capture group. */
+ InvalidFlag /**< Invalid value for match mode flags. */
+ LookBehindLimit /**< Look-Behind pattern matches must have a bounded maximum length. */
+ MissingCloseBracket /**< Missing closing bracket on a bracket expression. */
+ InvalidRange /**< In a character range [x-y], x is greater than y. */
+ PatternTooBig /**< Pattern exceeds limits on size or complexity. @stable ICU 55 */
+ InvalidCaptureGroupName /**< Invalid capture group name. @stable ICU 55 */
+)
+
+type MatchErrorCode int32
+
+const (
+ StackOverflow MatchErrorCode = iota /**< Regular expression backtrack stack overflow. */
+ TimeOut /**< Maximum allowed match time exceeded */
+ InternalMatchError /**< Internal error (bug) was detected. */
+)
diff --git a/go/vt/vtgr/controller/error.go b/go/mysql/icuregex/errors/error.go
similarity index 55%
rename from go/vt/vtgr/controller/error.go
rename to go/mysql/icuregex/errors/error.go
index 5613c802524..f03a5157acf 100644
--- a/go/vt/vtgr/controller/error.go
+++ b/go/mysql/icuregex/errors/error.go
@@ -1,5 +1,10 @@
/*
-Copyright 2021 The Vitess Authors.
+© 2016 and later: Unicode, Inc. and others.
+Copyright (C) 2004-2015, International Business Machines Corporation and others.
+Copyright 2023 The Vitess Authors.
+
+This file contains code derived from the Unicode Project's ICU library.
+License & terms of use for the original code: http://www.unicode.org/copyright.html
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,12 +19,9 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package controller
+package errors
import "errors"
-var (
- errMissingPrimaryTablet = errors.New("no primary tablet available")
- errMissingGroup = errors.New("no mysql group")
- errForceAbortBootstrap = errors.New("force abort bootstrap")
-)
+var ErrIllegalArgument = errors.New("illegal argument")
+var ErrUnsupported = errors.New("unsupported")
diff --git a/go/mysql/icuregex/icu_test.go b/go/mysql/icuregex/icu_test.go
new file mode 100644
index 00000000000..9e9be505df7
--- /dev/null
+++ b/go/mysql/icuregex/icu_test.go
@@ -0,0 +1,415 @@
+/*
+© 2016 and later: Unicode, Inc. and others.
+Copyright (C) 2004-2015, International Business Machines Corporation and others.
+Copyright 2023 The Vitess Authors.
+
+This file contains code derived from the Unicode Project's ICU library.
+License & terms of use for the original code: http://www.unicode.org/copyright.html
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package icuregex_test
+
+import (
+ "bufio"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "regexp"
+ "strconv"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "vitess.io/vitess/go/mysql/icuregex"
+ "vitess.io/vitess/go/mysql/icuregex/internal/pattern"
+)
+
+var ErrSkip = errors.New("ignored test")
+
+type Matcher int8
+
+const (
+ FuncFind Matcher = iota
+ FuncMatches
+ FuncLookingAt
+)
+
+type Expectation int8
+
+const (
+ Unknown Expectation = iota
+ Expected
+ NotExpected
+)
+
+type TestPattern struct {
+ Line string
+ Lineno int
+
+ Pattern string
+ Flags icuregex.RegexpFlag
+ Options struct {
+ MatchFunc Matcher
+ FindCount int
+ MatchOnly bool
+ MustError bool
+ Dump bool
+ HitEnd Expectation
+ RequireEnd Expectation
+ }
+ Input string
+ Groups []TestGroup
+}
+
+type TestGroup struct {
+ Start, End int
+}
+
+var parsePattern = regexp.MustCompile(`<(/?)(r|[0-9]+)>`)
+
+func (tp *TestPattern) parseFlags(line string) (string, error) {
+ for len(line) > 0 {
+ switch line[0] {
+ case '"', '\'', '/':
+ return line, nil
+ case ' ', '\t':
+ case 'i':
+ tp.Flags |= icuregex.CaseInsensitive
+ case 'x':
+ tp.Flags |= icuregex.Comments
+ case 's':
+ tp.Flags |= icuregex.DotAll
+ case 'm':
+ tp.Flags |= icuregex.Multiline
+ case 'e':
+ tp.Flags |= icuregex.ErrorOnUnknownEscapes
+ case 'D':
+ tp.Flags |= icuregex.UnixLines
+ case 'Q':
+ tp.Flags |= icuregex.Literal
+ case '2', '3', '4', '5', '6', '7', '8', '9':
+ tp.Options.FindCount = int(line[0] - '0')
+ case 'G':
+ tp.Options.MatchOnly = true
+ case 'E':
+ tp.Options.MustError = true
+ case 'd':
+ tp.Options.Dump = true
+ case 'L':
+ tp.Options.MatchFunc = FuncLookingAt
+ case 'M':
+ tp.Options.MatchFunc = FuncMatches
+ case 'v':
+ tp.Options.MustError = !icuregex.BreakIteration
+ case 'a', 'b':
+ return "", ErrSkip
+ case 'z':
+ tp.Options.HitEnd = Expected
+ case 'Z':
+ tp.Options.HitEnd = NotExpected
+ case 'y':
+ tp.Options.RequireEnd = Expected
+ case 'Y':
+ tp.Options.RequireEnd = NotExpected
+ default:
+ return "", fmt.Errorf("unexpected modifier '%c'", line[0])
+ }
+ line = line[1:]
+ }
+ return "", io.ErrUnexpectedEOF
+}
+
+func (tp *TestPattern) parseMatch(orig string) error {
+ input, ok := pattern.Unescape(orig)
+ if !ok {
+ return fmt.Errorf("failed to unquote input: %s", orig)
+ }
+
+ var detagged []rune
+ var last int
+
+ m := parsePattern.FindAllStringSubmatchIndex(input, -1)
+ for _, g := range m {
+ detagged = append(detagged, []rune(input[last:g[0]])...)
+ last = g[1]
+
+ closing := input[g[2]:g[3]] == "/"
+ groupNum := input[g[4]:g[5]]
+ if groupNum == "r" {
+ return ErrSkip
+ }
+ num, err := strconv.Atoi(groupNum)
+ if err != nil {
+ return fmt.Errorf("bad group number %q: %w", groupNum, err)
+ }
+
+ if num >= len(tp.Groups) {
+ grp := make([]TestGroup, num+1)
+ for i := range grp {
+ grp[i].Start = -1
+ grp[i].End = -1
+ }
+ copy(grp, tp.Groups)
+ tp.Groups = grp
+ }
+
+ if closing {
+ tp.Groups[num].End = len(detagged)
+ } else {
+ tp.Groups[num].Start = len(detagged)
+ }
+ }
+
+ detagged = append(detagged, []rune(input[last:])...)
+ tp.Input = string(detagged)
+ return nil
+}
+
+func ParseTestFile(t testing.TB, filename string) []TestPattern {
+ f, err := os.Open(filename)
+ require.NoError(t, err)
+
+ defer f.Close()
+ scanner := bufio.NewScanner(f)
+ var lineno int
+ var patterns []TestPattern
+
+ errFunc := func(err error) {
+ if err == ErrSkip {
+ return
+ }
+ t.Errorf("Parse error: %v\n%03d: %s", err, lineno, scanner.Text())
+ }
+
+ for scanner.Scan() {
+ lineno++
+ line := scanner.Text()
+ line = strings.TrimSpace(line)
+
+ if len(line) == 0 || line[0] == '#' {
+ continue
+ }
+
+ var tp TestPattern
+ tp.Line = line
+ tp.Lineno = lineno
+
+ idx := strings.IndexByte(line[1:], line[0])
+
+ tp.Pattern = line[1 : idx+1]
+ line, err = tp.parseFlags(line[idx+2:])
+ if err != nil {
+ errFunc(err)
+ continue
+ }
+
+ idx = strings.IndexByte(line[1:], line[0])
+ err = tp.parseMatch(line[1 : idx+1])
+ if err != nil {
+ errFunc(err)
+ continue
+ }
+
+ patterns = append(patterns, tp)
+ }
+
+ err = scanner.Err()
+ require.NoError(t, err)
+ return patterns
+}
+
+func (tp *TestPattern) fail(t testing.TB, msg string, args ...any) bool {
+ t.Helper()
+ msg = fmt.Sprintf(msg, args...)
+ t.Errorf("%s (in line %d)\nregexp: %s\ninput: %q\noriginal: %s", msg, tp.Lineno, tp.Pattern, tp.Input, tp.Line)
+ return false
+}
+
+func (tp *TestPattern) Test(t testing.TB) bool {
+ re, err := func() (re *icuregex.Pattern, err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ err = fmt.Errorf("PANIC: %v", r)
+ }
+ }()
+ re, err = icuregex.CompileString(tp.Pattern, tp.Flags)
+ return
+ }()
+ if err != nil {
+ if tp.Options.MustError {
+ return true
+ }
+
+ return tp.fail(t, "unexpected parser failure: %v", err)
+ }
+ if tp.Options.MustError {
+ return tp.fail(t, "parse failure expected")
+ }
+
+ matcher := re.Match(tp.Input)
+ var isMatch bool
+ var findCount = tp.Options.FindCount
+ if findCount == 0 {
+ findCount = 1
+ }
+
+ for i := 0; i < findCount; i++ {
+ isMatch, err = func() (bool, error) {
+ defer func() {
+ if r := recover(); r != nil {
+ tp.fail(t, "unexpected match failure: %v", r)
+ }
+ }()
+ switch tp.Options.MatchFunc {
+ case FuncMatches:
+ return matcher.Matches()
+ case FuncLookingAt:
+ return matcher.LookingAt()
+ case FuncFind:
+ return matcher.Find()
+ default:
+ panic("invalid MatchFunc")
+ }
+ }()
+ }
+
+ require.NoError(t, err)
+
+ if !isMatch && len(tp.Groups) > 0 {
+ return tp.fail(t, "Match expected, but none found.")
+ }
+ if isMatch && len(tp.Groups) == 0 {
+ return tp.fail(t, "No match expected, but found one at position %d", matcher.Start())
+ }
+ if tp.Options.MatchOnly {
+ return true
+ }
+
+ for i := 0; i < matcher.GroupCount(); i++ {
+ expectedStart := -1
+ expectedEnd := -1
+
+ if i < len(tp.Groups) {
+ expectedStart = tp.Groups[i].Start
+ expectedEnd = tp.Groups[i].End
+ }
+ if gotStart := matcher.StartForGroup(i); gotStart != expectedStart {
+ return tp.fail(t, "Incorrect start position for group %d. Expected %d, got %d", i, expectedStart, gotStart)
+ }
+ if gotEnd := matcher.EndForGroup(i); gotEnd != expectedEnd {
+ return tp.fail(t, "Incorrect end position for group %d. Expected %d, got %d", i, expectedEnd, gotEnd)
+ }
+ }
+
+ if matcher.GroupCount()+1 < len(tp.Groups) {
+ return tp.fail(t, "Expected %d capture groups, found %d", len(tp.Groups)-1, matcher.GroupCount())
+ }
+
+ if tp.Options.HitEnd == Expected && !matcher.HitEnd() {
+ return tp.fail(t, "HitEnd() returned false. Expected true")
+ }
+ if tp.Options.HitEnd == NotExpected && matcher.HitEnd() {
+ return tp.fail(t, "HitEnd() returned true. Expected false")
+ }
+
+ if tp.Options.RequireEnd == Expected && !matcher.RequireEnd() {
+ return tp.fail(t, "RequireEnd() returned false. Expected true")
+ }
+ if tp.Options.RequireEnd == NotExpected && matcher.RequireEnd() {
+ return tp.fail(t, "RequireEnd() returned true. Expected false")
+ }
+
+ return true
+}
+
+func TestICU(t *testing.T) {
+ pats := ParseTestFile(t, "testdata/regextst.txt")
+
+ var valid int
+
+ for _, p := range pats {
+ if p.Test(t) {
+ valid++
+ }
+ }
+
+ t.Logf("%d/%d (%.02f)", valid, len(pats), float64(valid)/float64(len(pats)))
+}
+
+func TestICUExtended(t *testing.T) {
+ // This tests additional cases that aren't covered in the
+ // copied ICU test suite.
+ pats := ParseTestFile(t, "testdata/regextst_extended.txt")
+
+ var valid int
+
+ for _, p := range pats {
+ if p.Test(t) {
+ valid++
+ }
+ }
+
+ t.Logf("%d/%d (%.02f)", valid, len(pats), float64(valid)/float64(len(pats)))
+}
+
+func TestCornerCases(t *testing.T) {
+ var cases = []struct {
+ Pattern string
+ Input string
+ Flags icuregex.RegexpFlag
+ Match bool
+ }{
+ {`xyz$`, "xyz\n", 0, true},
+ {`a*+`, "abbxx", 0, true},
+ {`(ABC){1,2}+ABC`, "ABCABCABC", 0, true},
+ {`(ABC){2,3}+ABC`, "ABCABCABC", 0, false},
+ {`(abc)*+a`, "abcabcabc", 0, false},
+ {`(abc)*+a`, "abcabcab", 0, true},
+ {`a\N{LATIN SMALL LETTER B}c`, "abc", 0, true},
+ {`a.b`, "a\rb", icuregex.UnixLines, true},
+ {`a.b`, "a\rb", 0, false},
+ {`(?d)abc$`, "abc\r", 0, false},
+ {`[ \b]`, "b", 0, true},
+ {`[abcd-\N{LATIN SMALL LETTER G}]+`, "xyz-abcdefghij-", 0, true},
+ {`[[abcd]&&[ac]]+`, "bacacd", 0, true},
+ }
+
+ for _, tc := range cases {
+ t.Run(tc.Pattern, func(t *testing.T) {
+ _, err := icuregex.CompileString(tc.Pattern, tc.Flags)
+ require.NoError(t, err)
+ })
+ }
+}
+
+func TestOne(t *testing.T) {
+ const Pattern = `\p{CaseIgnorable}`
+ const Input = "foo.bar"
+ const Flags = 0
+
+ re, err := icuregex.CompileString(Pattern, Flags)
+ require.NoError(t, err)
+
+ re.Dump(os.Stderr)
+
+ m := icuregex.NewMatcher(re)
+ m.Dumper(os.Stderr)
+ m.ResetString(Input)
+ found, err := m.Find()
+ require.NoError(t, err)
+ t.Logf("match = %v", found)
+}
diff --git a/go/mysql/icuregex/internal/bytestrie/bytes_trie.go b/go/mysql/icuregex/internal/bytestrie/bytes_trie.go
new file mode 100644
index 00000000000..aff80dc3e69
--- /dev/null
+++ b/go/mysql/icuregex/internal/bytestrie/bytes_trie.go
@@ -0,0 +1,354 @@
+/*
+© 2016 and later: Unicode, Inc. and others.
+Copyright (C) 2004-2015, International Business Machines Corporation and others.
+Copyright 2023 The Vitess Authors.
+
+This file contains code derived from the Unicode Project's ICU library.
+License & terms of use for the original code: http://www.unicode.org/copyright.html
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package bytestrie
+
+type BytesTrie struct {
+ pos []byte
+ original []byte
+ remainingMatchLength int32
+}
+
+func New(pos []byte) BytesTrie {
+ return BytesTrie{pos: pos, original: pos, remainingMatchLength: -1}
+}
+
+type result int32
+
+const ( /**
+ * The input unit(s) did not continue a matching string.
+ * Once current()/next() return NO_MATCH,
+ * all further calls to current()/next() will also return NO_MATCH,
+ * until the trie is reset to its original state or to a saved state.
+ * @stable ICU 4.8
+ */
+ noMatch result = iota
+ /**
+ * The input unit(s) continued a matching string
+ * but there is no value for the string so far.
+ * (It is a prefix of a longer string.)
+ * @stable ICU 4.8
+ */
+ noValue
+ /**
+ * The input unit(s) continued a matching string
+ * and there is a value for the string so far.
+ * This value will be returned by getValue().
+ * No further input byte/unit can continue a matching string.
+ * @stable ICU 4.8
+ */
+ finalValue
+ /**
+ * The input unit(s) continued a matching string
+ * and there is a value for the string so far.
+ * This value will be returned by getValue().
+ * Another input byte/unit can continue a matching string.
+ * @stable ICU 4.8
+ */
+ intermediateValue
+)
+
+const (
+ maxBranchLinearSubNodeLength = 5
+
+ // 10..1f: Linear-match node, match 1..16 bytes and continue reading the next node.
+ minLinearMatch = 0x10
+ maxLinearMatchLength = 0x10
+
+ // 20..ff: Variable-length value node.
+ // If odd, the value is final. (Otherwise, intermediate value or jump delta.)
+ // Then shift-right by 1 bit.
+ // The remaining lead byte value indicates the number of following bytes (0..4)
+ // and contains the value's top bits.
+ minValueLead = minLinearMatch + maxLinearMatchLength // 0x20
+ // It is a final value if bit 0 is set.
+ valueIsFinal = 1
+
+ // Compact value: After testing bit 0, shift right by 1 and then use the following thresholds.
+ minOneByteValueLead = minValueLead / 2 // 0x10
+ maxOneByteValue = 0x40 // At least 6 bits in the first byte.
+
+ minTwoByteValueLead = minOneByteValueLead + maxOneByteValue + 1 // 0x51
+ maxTwoByteValue = 0x1aff
+ minThreeByteValueLead = minTwoByteValueLead + (maxTwoByteValue >> 8) + 1 // 0x6c
+ fourByteValueLead = 0x7e
+
+ // Compact delta integers.
+ maxOneByteDelta = 0xbf
+ minTwoByteDeltaLead = maxOneByteDelta + 1 // 0xc0
+ minThreeByteDeltaLead = 0xf0
+ fourByteDeltaLead = 0xfe
+)
+
+func (bt *BytesTrie) ContainsName(name string) bool {
+ result := noValue
+ for _, c := range []byte(name) {
+ if 'A' <= c && c <= 'Z' {
+ c += 'a' - 'A'
+ }
+ if c == 0x2d || c == 0x5f || c == 0x20 || (0x09 <= c && c <= 0x0d) {
+ continue
+ }
+ if result&1 == 0 {
+ return false
+ }
+ result = bt.next(int32(c))
+ }
+ return result >= finalValue
+}
+
+func (bt *BytesTrie) next(inByte int32) result {
+ pos := bt.pos
+ if pos == nil {
+ return noMatch
+ }
+ if inByte < 0 {
+ inByte += 0x100
+ }
+ length := bt.remainingMatchLength // Actual remaining match length minus 1.
+ if length >= 0 {
+ match := inByte == int32(pos[0])
+ pos = pos[1:]
+ // Remaining part of a linear-match node.
+ if match {
+ length = length - 1
+ bt.remainingMatchLength = length
+ bt.pos = pos
+ if length < 0 {
+ node := int32(pos[0])
+ if node >= minValueLead {
+ return bt.valueResult(node)
+ }
+ }
+ return noValue
+ }
+ bt.stop()
+ return noMatch
+ }
+ return bt.nextImpl(pos, inByte)
+}
+
+func (bt *BytesTrie) nextImpl(pos []byte, inByte int32) result {
+ for {
+ node := int32(pos[0])
+ pos = pos[1:]
+ if node < minLinearMatch {
+ return bt.branchNext(pos, node, inByte)
+ } else if node < minValueLead {
+ // Match the first of length+1 bytes.
+ length := node - minLinearMatch // Actual match length minus 1.
+ match := inByte == int32(pos[0])
+ pos = pos[1:]
+ if match {
+ length = length - 1
+ bt.remainingMatchLength = length
+ bt.pos = pos
+ if length < 0 {
+ node = int32(pos[0])
+ if node >= minValueLead {
+ return bt.valueResult(node)
+ }
+ }
+ return noValue
+ }
+ // No match.
+ break
+ } else if (node & valueIsFinal) != 0 {
+ // No further matching bytes.
+ break
+ } else {
+ // Skip intermediate value.
+ pos = bt.skipValue2(pos, node)
+ // The next node must not also be a value node.
+ }
+ }
+ bt.stop()
+ return noMatch
+}
+
+func (bt *BytesTrie) stop() {
+ bt.pos = nil
+}
+
+func (bt *BytesTrie) valueResult(node int32) result {
+ return intermediateValue - result(node&valueIsFinal)
+}
+
+func (bt *BytesTrie) branchNext(pos []byte, length int32, inByte int32) result {
+ // Branch according to the current unit.
+ if length == 0 {
+ length = int32(pos[0])
+ pos = pos[1:]
+ }
+ length++
+ // The length of the branch is the number of units to select from.
+ // The data structure encodes a binary search.
+ for length > maxBranchLinearSubNodeLength {
+ p := int32(pos[0])
+ pos = pos[1:]
+ if inByte < p {
+ length >>= 1
+ pos = bt.jumpByDelta(pos)
+ } else {
+ length = length - (length >> 1)
+ pos = bt.skipDelta(pos)
+ }
+ }
+ // Drop down to linear search for the last few bytes.
+ // length>=2 because the loop body above sees length>kMaxBranchLinearSubNodeLength>=3
+ // and divides length by 2.
+ for {
+ p := int32(pos[0])
+ pos = pos[1:]
+ if inByte == p {
+ var result result
+ node := int32(pos[0])
+ if (node & valueIsFinal) != 0 {
+ // Leave the final value for getValue() to read.
+ result = finalValue
+ } else {
+ // Use the non-final value as the jump delta.
+ pos = pos[1:]
+ // int32_t delta=readValue(pos, node>>1);
+ node >>= 1
+ var delta int32
+ if node < minTwoByteValueLead {
+ delta = node - minOneByteValueLead
+ } else if node < minThreeByteValueLead {
+ delta = ((node - minTwoByteValueLead) << 8) | int32(pos[0])
+ pos = pos[1:]
+ } else if node < fourByteValueLead {
+ delta = ((node - minThreeByteValueLead) << 16) | (int32(pos[0]) << 8) | int32(pos[1])
+ pos = pos[2:]
+ } else if node == fourByteValueLead {
+ delta = (int32(pos[0]) << 16) | (int32(pos[1]) << 8) | int32(pos[2])
+ pos = pos[3:]
+ } else {
+ delta = (int32(pos[0]) << 24) | (int32(pos[1]) << 16) | (int32(pos[2]) << 8) | int32(pos[3])
+ pos = pos[4:]
+ }
+ // end readValue()
+ pos = pos[delta:]
+ node = int32(pos[0])
+ if node >= minValueLead {
+ result = bt.valueResult(node)
+ } else {
+ result = noValue
+ }
+ }
+ bt.pos = pos
+ return result
+ }
+ length--
+ pos = bt.skipValue1(pos)
+ if length <= 1 {
+ break
+ }
+ }
+ p := int32(pos[0])
+ pos = pos[1:]
+ if inByte == p {
+ bt.pos = pos
+ node := int32(pos[0])
+ if node >= minValueLead {
+ return bt.valueResult(node)
+ }
+ return noValue
+ }
+ bt.stop()
+ return noMatch
+}
+
+func (bt *BytesTrie) skipValue1(pos []byte) []byte {
+ leadByte := int32(pos[0])
+ return bt.skipValue2(pos[1:], leadByte)
+}
+
+func (bt *BytesTrie) skipValue2(pos []byte, leadByte int32) []byte {
+ if leadByte >= (minTwoByteValueLead << 1) {
+ if leadByte < (minThreeByteValueLead << 1) {
+ pos = pos[1:]
+ } else if leadByte < (fourByteValueLead << 1) {
+ pos = pos[2:]
+ } else {
+ pos = pos[3+((leadByte>>1)&1):]
+ }
+ }
+ return pos
+}
+
+func (bt *BytesTrie) skipDelta(pos []byte) []byte {
+ delta := int32(pos[0])
+ pos = pos[1:]
+ if delta >= minTwoByteDeltaLead {
+ if delta < minThreeByteDeltaLead {
+ pos = pos[1:]
+ } else if delta < fourByteDeltaLead {
+ pos = pos[2:]
+ } else {
+ pos = pos[3+(delta&1):]
+ }
+ }
+ return pos
+}
+
+func (bt *BytesTrie) jumpByDelta(pos []byte) []byte {
+ delta := int32(pos[0])
+ pos = pos[1:]
+ if delta < minTwoByteDeltaLead {
+ // nothing to do
+ } else if delta < minThreeByteDeltaLead {
+ delta = ((delta - minTwoByteDeltaLead) << 8) | int32(pos[0])
+ pos = pos[1:]
+ } else if delta < fourByteDeltaLead {
+ delta = ((delta - minThreeByteDeltaLead) << 16) | (int32(pos[0]) << 8) | int32(pos[1])
+ pos = pos[2:]
+ } else if delta == fourByteDeltaLead {
+ delta = (int32(pos[0]) << 16) | (int32(pos[1]) << 8) | int32(pos[2])
+ pos = pos[3:]
+ } else {
+ delta = (int32(pos[0]) << 24) | (int32(pos[1]) << 16) | (int32(pos[2]) << 8) | int32(pos[3])
+ pos = pos[4:]
+ }
+ return pos[delta:]
+}
+
+func (bt *BytesTrie) GetValue() int32 {
+ pos := bt.pos
+ leadByte := int32(pos[0])
+ return bt.readValue(pos[1:], leadByte>>1)
+}
+
+func (bt *BytesTrie) readValue(pos []byte, leadByte int32) int32 {
+ var value int32
+ if leadByte < minTwoByteValueLead {
+ value = leadByte - minOneByteValueLead
+ } else if leadByte < minThreeByteValueLead {
+ value = ((leadByte - minTwoByteValueLead) << 8) | int32(pos[0])
+ } else if leadByte < fourByteValueLead {
+ value = ((leadByte - minThreeByteValueLead) << 16) | (int32(pos[0]) << 8) | int32(pos[1])
+ } else if leadByte == fourByteValueLead {
+ value = (int32(pos[0]) << 16) | (int32(pos[1]) << 8) | int32(pos[2])
+ } else {
+ value = (int32(pos[0]) << 24) | (int32(pos[1]) << 16) | (int32(pos[2]) << 8) | int32(pos[3])
+ }
+ return value
+}
diff --git a/go/mysql/icuregex/internal/icudata/README.md b/go/mysql/icuregex/internal/icudata/README.md
new file mode 100644
index 00000000000..070633b555e
--- /dev/null
+++ b/go/mysql/icuregex/internal/icudata/README.md
@@ -0,0 +1,46 @@
+# ICU data files
+
+These are files copied from the ICU project that contain various types
+of data, like character properties.
+
+## How to update
+
+Not all data files are immediately available in the source code, but
+need to be built first. This applies to the character / word break
+tables.
+
+### Copy from source data
+
+The `icu4c/source/data/in` directory in the source distribution contains
+the following ICU data files we use:
+
+```
+pnames.icu
+ubidi.icu
+ucase.icu
+unames.icu
+ulayout.icu
+uprops.icu
+nfc.nrm
+nfkc.nrm
+nfkc_cf.nrm
+```
+
+The character and word break table need to be compiled before they can
+be copied.
+
+In `icu4c/source` run:
+
+```bash
+./configure --with-data-packaging=files
+make
+```
+
+This will compile the character and word break data into a binary file
+that we can use. Once built, the following files we use are available in
+`icu4c/source/data/out/build/icudtl/brkitr`:
+
+```
+char.brk
+word.brk
+```
diff --git a/go/mysql/icuregex/internal/icudata/char.brk b/go/mysql/icuregex/internal/icudata/char.brk
new file mode 100644
index 00000000000..a243ae6580a
Binary files /dev/null and b/go/mysql/icuregex/internal/icudata/char.brk differ
diff --git a/go/mysql/icuregex/internal/icudata/embed.go b/go/mysql/icuregex/internal/icudata/embed.go
new file mode 100644
index 00000000000..12dbd5d0322
--- /dev/null
+++ b/go/mysql/icuregex/internal/icudata/embed.go
@@ -0,0 +1,101 @@
+/*
+© 2016 and later: Unicode, Inc. and others.
+Copyright (C) 2004-2015, International Business Machines Corporation and others.
+Copyright 2023 The Vitess Authors.
+
+This file contains code derived from the Unicode Project's ICU library.
+License & terms of use for the original code: http://www.unicode.org/copyright.html
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package icudata
+
+import _ "embed"
+
+// PNames is the list of property names. It is used
+// for example by usage of Unicode propery name aliases
+// in regular expressions.
+//
+//go:embed pnames.icu
+var PNames []byte
+
+// UBidi is the list of bidi properties. These are used
+// by Bidi class aliases in regular expressions.
+//
+//go:embed ubidi.icu
+var UBidi []byte
+
+// UCase is the list of case properties. These are used
+// for case folding internally for case insensitive matching.
+//
+//go:embed ucase.icu
+var UCase []byte
+
+// UEmoji is the list of Emoji properties.
+//
+//go:embed uemoji.icu
+var UEmoji []byte
+
+// ULayout is used for property checks agains the InPC, InSC
+// and VO properties.
+//
+//go:embed ulayout.icu
+var ULayout []byte
+
+// UNames is used for named character references in regular
+// expressions.
+//
+//go:embed unames.icu
+var UNames []byte
+
+// UProps is used for all the character properties. These
+// are used to retrieve properties of characters for character
+// classes, like letters, whitespace, digits etc.
+//
+//go:embed uprops.icu
+var UProps []byte
+
+// Nfc is the table for character normalization where canonical
+// decomposition is done followed by canonical composition.
+// This is used for property checks of characters about composition.
+//
+//go:embed nfc.nrm
+var Nfc []byte
+
+// Nfkc is the table for character normalization where compatibility
+// decomposition is done followed by canonical composition.
+// This is used for property checks of characters about composition.
+//
+//go:embed nfkc.nrm
+var Nfkc []byte
+
+// NfkcCf is the table for character normalization where compatibility
+// decomposition is done followed by canonical composition with
+// case folding.
+// This is used for property checks of characters about composition.
+//
+//Unused: go:embed nfkc_cf.nrm
+//var NfkcCf []byte
+
+// BrkChar is used for matching against character break
+// characters in regular expressions.
+//
+//Unused: go:embed char.brk
+//var BrkChar []byte
+
+// BrkWord is used for matching against word break
+// characters in regular expressions.
+//
+//Unused: go:embed word.brk
+///var BrkWord []byte
diff --git a/go/mysql/icuregex/internal/icudata/nfc.nrm b/go/mysql/icuregex/internal/icudata/nfc.nrm
new file mode 100644
index 00000000000..2b0e972807e
Binary files /dev/null and b/go/mysql/icuregex/internal/icudata/nfc.nrm differ
diff --git a/go/mysql/icuregex/internal/icudata/nfkc.nrm b/go/mysql/icuregex/internal/icudata/nfkc.nrm
new file mode 100644
index 00000000000..deffa3daa81
Binary files /dev/null and b/go/mysql/icuregex/internal/icudata/nfkc.nrm differ
diff --git a/go/mysql/icuregex/internal/icudata/nfkc_cf.nrm b/go/mysql/icuregex/internal/icudata/nfkc_cf.nrm
new file mode 100644
index 00000000000..3f8d756a0f4
Binary files /dev/null and b/go/mysql/icuregex/internal/icudata/nfkc_cf.nrm differ
diff --git a/go/mysql/icuregex/internal/icudata/pnames.icu b/go/mysql/icuregex/internal/icudata/pnames.icu
new file mode 100644
index 00000000000..c960dc00b49
Binary files /dev/null and b/go/mysql/icuregex/internal/icudata/pnames.icu differ
diff --git a/go/mysql/icuregex/internal/icudata/ubidi.icu b/go/mysql/icuregex/internal/icudata/ubidi.icu
new file mode 100644
index 00000000000..cfde07406cc
Binary files /dev/null and b/go/mysql/icuregex/internal/icudata/ubidi.icu differ
diff --git a/go/mysql/icuregex/internal/icudata/ucase.icu b/go/mysql/icuregex/internal/icudata/ucase.icu
new file mode 100644
index 00000000000..670b0827d55
Binary files /dev/null and b/go/mysql/icuregex/internal/icudata/ucase.icu differ
diff --git a/go/mysql/icuregex/internal/icudata/uemoji.icu b/go/mysql/icuregex/internal/icudata/uemoji.icu
new file mode 100644
index 00000000000..11fdf50ff18
Binary files /dev/null and b/go/mysql/icuregex/internal/icudata/uemoji.icu differ
diff --git a/go/mysql/icuregex/internal/icudata/ulayout.icu b/go/mysql/icuregex/internal/icudata/ulayout.icu
new file mode 100644
index 00000000000..ca6d0013c08
Binary files /dev/null and b/go/mysql/icuregex/internal/icudata/ulayout.icu differ
diff --git a/go/mysql/icuregex/internal/icudata/unames.icu b/go/mysql/icuregex/internal/icudata/unames.icu
new file mode 100644
index 00000000000..e271e78619f
Binary files /dev/null and b/go/mysql/icuregex/internal/icudata/unames.icu differ
diff --git a/go/mysql/icuregex/internal/icudata/uprops.icu b/go/mysql/icuregex/internal/icudata/uprops.icu
new file mode 100644
index 00000000000..0cdd8dea636
Binary files /dev/null and b/go/mysql/icuregex/internal/icudata/uprops.icu differ
diff --git a/go/mysql/icuregex/internal/icudata/word.brk b/go/mysql/icuregex/internal/icudata/word.brk
new file mode 100644
index 00000000000..80460c60128
Binary files /dev/null and b/go/mysql/icuregex/internal/icudata/word.brk differ
diff --git a/go/mysql/icuregex/internal/normalizer/constants.go b/go/mysql/icuregex/internal/normalizer/constants.go
new file mode 100644
index 00000000000..3c2de588952
--- /dev/null
+++ b/go/mysql/icuregex/internal/normalizer/constants.go
@@ -0,0 +1,122 @@
+/*
+© 2016 and later: Unicode, Inc. and others.
+Copyright (C) 2004-2015, International Business Machines Corporation and others.
+Copyright 2023 The Vitess Authors.
+
+This file contains code derived from the Unicode Project's ICU library.
+License & terms of use for the original code: http://www.unicode.org/copyright.html
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package normalizer
+
+const (
+ // Fixed norm16 values.
+ minYesYesWithCC = 0xfe02
+ jamoVt = 0xfe00
+ minNormalMaybeYes = 0xfc00
+ jamoL = 2 // offset=1 hasCompBoundaryAfter=false
+ inert = 1 // offset=0 hasCompBoundaryAfter=true
+
+ // norm16 bit 0 is comp-boundary-after.
+ hasCompBoundaryAfter = 1
+ offsetShift = 1
+
+ // For algorithmic one-way mappings, norm16 bits 2..1 indicate the
+ // tccc (0, 1, >1) for quick FCC boundary-after tests.
+ deltaTccc0 = 0
+ deltaTccc1 = 2
+ deltaTcccGt1 = 4
+ deltaTcccMask = 6
+ deltaShift = 3
+
+ maxDelta = 0x40
+)
+
+const (
+ jamoLBase rune = 0x1100 /* "lead" jamo */
+ jamoLEnd rune = 0x1112
+ jamoVBase rune = 0x1161 /* "vowel" jamo */
+ jamoVEnd rune = 0x1175
+ jamoTBase rune = 0x11a7 /* "trail" jamo */
+ jamoTEnd rune = 0x11c2
+
+ hangulBase rune = 0xac00
+ hangulEnd rune = 0xd7a3
+
+ jamoLCount rune = 19
+ jamoVCount rune = 21
+ jamoTCount rune = 28
+
+ hangulCount = jamoLCount * jamoVCount * jamoTCount
+ hangulLimit = hangulBase + hangulCount
+)
+
+const (
+ mappingHasCccLcccWord = 0x80
+ mappingHasRawMapping = 0x40
+ // unused bit 0x20,
+ mappingLengthMask = 0x1f
+)
+
+/**
+ * Constants for normalization modes.
+ * @deprecated ICU 56 Use unorm2.h instead.
+ */
+type Mode int32
+
+const (
+ /** No decomposition/composition. @deprecated ICU 56 Use unorm2.h instead. */
+ NormNone Mode = 1
+ /** Canonical decomposition. @deprecated ICU 56 Use unorm2.h instead. */
+ NormNfd Mode = 2
+ /** Compatibility decomposition. @deprecated ICU 56 Use unorm2.h instead. */
+ NormNfkd Mode = 3
+ /** Canonical decomposition followed by canonical composition. @deprecated ICU 56 Use unorm2.h instead. */
+ NormNfc Mode = 4
+ /** Default normalization. @deprecated ICU 56 Use unorm2.h instead. */
+ NormDefault Mode = NormNfc
+ /** Compatibility decomposition followed by canonical composition. @deprecated ICU 56 Use unorm2.h instead. */
+ NormNfkc Mode = 5
+ /** "Fast C or D" form. @deprecated ICU 56 Use unorm2.h instead. */
+ NormFcd Mode = 6
+)
+
+/**
+ * Result values for normalization quick check functions.
+ * For details see http://www.unicode.org/reports/tr15/#Detecting_Normalization_Forms
+ * @stable ICU 2.0
+ */
+type CheckResult int
+
+const (
+ /**
+ * The input string is not in the normalization form.
+ * @stable ICU 2.0
+ */
+ No CheckResult = iota
+ /**
+ * The input string is in the normalization form.
+ * @stable ICU 2.0
+ */
+ Yes
+ /**
+ * The input string may or may not be in the normalization form.
+ * This value is only returned for composition forms like NFC and FCC,
+ * when a backward-combining character is found for which the surrounding text
+ * would have to be analyzed further.
+ * @stable ICU 2.0
+ */
+ Maybe
+)
diff --git a/go/mysql/icuregex/internal/normalizer/normalizer.go b/go/mysql/icuregex/internal/normalizer/normalizer.go
new file mode 100644
index 00000000000..c13a4878deb
--- /dev/null
+++ b/go/mysql/icuregex/internal/normalizer/normalizer.go
@@ -0,0 +1,482 @@
+/*
+© 2016 and later: Unicode, Inc. and others.
+Copyright (C) 2004-2015, International Business Machines Corporation and others.
+Copyright 2023 The Vitess Authors.
+
+This file contains code derived from the Unicode Project's ICU library.
+License & terms of use for the original code: http://www.unicode.org/copyright.html
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package normalizer
+
+import (
+ "errors"
+ "sync"
+
+ "vitess.io/vitess/go/mysql/icuregex/internal/icudata"
+ "vitess.io/vitess/go/mysql/icuregex/internal/udata"
+ "vitess.io/vitess/go/mysql/icuregex/internal/uset"
+ "vitess.io/vitess/go/mysql/icuregex/internal/utf16"
+ "vitess.io/vitess/go/mysql/icuregex/internal/utrie"
+)
+
+type Normalizer struct {
+ minDecompNoCP rune
+ minCompNoMaybeCP rune
+ minLcccCP rune
+
+ // Norm16 value thresholds for quick check combinations and types of extra data.
+ minYesNo uint16
+ minYesNoMappingsOnly uint16
+ minNoNo uint16
+ minNoNoCompBoundaryBefore uint16
+ minNoNoCompNoMaybeCC uint16
+ minNoNoEmpty uint16
+ limitNoNo uint16
+ centerNoNoDelta uint16
+ minMaybeYes uint16
+
+ normTrie *utrie.UcpTrie
+
+ maybeYesCompositions []uint16
+ extraData []uint16 // mappings and/or compositions for yesYes, yesNo & noNo characters
+ smallFCD []uint8 // [0x100] one bit per 32 BMP code points, set if any FCD!=0
+}
+
+var nfc *Normalizer
+var nfkc *Normalizer
+
+var normalizerOnce sync.Once
+
+func loadNormalizer() {
+ normalizerOnce.Do(func() {
+ nfc = &Normalizer{}
+ if err := nfc.load(icudata.Nfc); err != nil {
+ panic(err)
+ }
+
+ nfkc = &Normalizer{}
+ if err := nfkc.load(icudata.Nfkc); err != nil {
+ panic(err)
+ }
+ })
+}
+
+const ixNormTrieOffset = 0
+const ixExtraDataOffset = 1
+const ixSmallFcdOffset = 2
+const ixReserved3Offset = 3
+const ixTotalSize = 7
+
+const ixMinDecompNoCp = 8
+const ixMinCompNoMaybeCp = 9
+
+/** Mappings & compositions in [minYesNo..minYesNoMappingsOnly[. */
+const ixMinYesNo = 10
+
+/** Mappings are comp-normalized. */
+const ixMinNoNo = 11
+const ixLimitNoNo = 12
+const ixMinMaybeYes = 13
+
+/** Mappings only in [minYesNoMappingsOnly..minNoNo[. */
+const ixMinYesNoMappingsOnly = 14
+
+/** Mappings are not comp-normalized but have a comp boundary before. */
+const ixMinNoNoCompBoundaryBefore = 15
+
+/** Mappings do not have a comp boundary before. */
+const ixMinNoNoCompNoMaybeCc = 16
+
+/** Mappings to the empty string. */
+const ixMinNoNoEmpty = 17
+
+const ixMinLcccCp = 18
+const ixCount = 20
+
+func (n *Normalizer) load(data []byte) error {
+ bytes := udata.NewBytes(data)
+
+ err := bytes.ReadHeader(func(info *udata.DataInfo) bool {
+ return info.Size >= 20 &&
+ info.IsBigEndian == 0 &&
+ info.CharsetFamily == 0 &&
+ info.DataFormat[0] == 0x4e && /* dataFormat="unam" */
+ info.DataFormat[1] == 0x72 &&
+ info.DataFormat[2] == 0x6d &&
+ info.DataFormat[3] == 0x32 &&
+ info.FormatVersion[0] == 4
+ })
+ if err != nil {
+ return err
+ }
+
+ indexesLength := int32(bytes.Uint32()) / 4
+ if indexesLength <= ixMinLcccCp {
+ return errors.New("normalizer2 data: not enough indexes")
+ }
+ indexes := make([]int32, indexesLength)
+ indexes[0] = indexesLength * 4
+ for i := int32(1); i < indexesLength; i++ {
+ indexes[i] = bytes.Int32()
+ }
+
+ n.minDecompNoCP = indexes[ixMinDecompNoCp]
+ n.minCompNoMaybeCP = indexes[ixMinCompNoMaybeCp]
+ n.minLcccCP = indexes[ixMinLcccCp]
+
+ n.minYesNo = uint16(indexes[ixMinYesNo])
+ n.minYesNoMappingsOnly = uint16(indexes[ixMinYesNoMappingsOnly])
+ n.minNoNo = uint16(indexes[ixMinNoNo])
+ n.minNoNoCompBoundaryBefore = uint16(indexes[ixMinNoNoCompBoundaryBefore])
+ n.minNoNoCompNoMaybeCC = uint16(indexes[ixMinNoNoCompNoMaybeCc])
+ n.minNoNoEmpty = uint16(indexes[ixMinNoNoEmpty])
+ n.limitNoNo = uint16(indexes[ixLimitNoNo])
+ n.minMaybeYes = uint16(indexes[ixMinMaybeYes])
+
+ n.centerNoNoDelta = uint16(indexes[ixMinMaybeYes]>>deltaShift) - maxDelta - 1
+
+ offset := indexes[ixNormTrieOffset]
+ nextOffset := indexes[ixExtraDataOffset]
+ triePosition := bytes.Position()
+
+ n.normTrie, err = utrie.UcpTrieFromBytes(bytes)
+ if err != nil {
+ return err
+ }
+
+ trieLength := bytes.Position() - triePosition
+ if trieLength > nextOffset-offset {
+ return errors.New("normalizer2 data: not enough bytes for normTrie")
+ }
+ bytes.Skip((nextOffset - offset) - trieLength) // skip padding after trie bytes
+
+ // Read the composition and mapping data.
+ offset = nextOffset
+ nextOffset = indexes[ixSmallFcdOffset]
+ numChars := (nextOffset - offset) / 2
+ if numChars != 0 {
+ n.maybeYesCompositions = bytes.Uint16Slice(numChars)
+ n.extraData = n.maybeYesCompositions[((minNormalMaybeYes - n.minMaybeYes) >> offsetShift):]
+ }
+
+ // smallFCD: new in formatVersion 2
+ n.smallFCD = bytes.Uint8Slice(0x100)
+ return nil
+}
+
+func Nfc() *Normalizer {
+ loadNormalizer()
+ return nfc
+}
+
+func Nfkc() *Normalizer {
+ loadNormalizer()
+ return nfkc
+}
+
+func (n *Normalizer) AddPropertyStarts(u *uset.UnicodeSet) {
+ var start, end rune
+ var value uint32
+ for {
+ end, value = nfc.normTrie.GetRange(start, utrie.UcpMapRangeFixedLeadSurrogates, inert, nil)
+ if end < 0 {
+ break
+ }
+ u.AddRune(start)
+ if start != end && n.isAlgorithmicNoNo(uint16(value)) && (value&deltaTcccMask) > deltaTccc1 {
+ // Range of code points with same-norm16-value algorithmic decompositions.
+ // They might have different non-zero FCD16 values.
+ prevFCD16 := n.GetFCD16(start)
+ for {
+ start++
+ if start > end {
+ break
+ }
+ fcd16 := n.GetFCD16(start)
+ if fcd16 != prevFCD16 {
+ u.AddRune(start)
+ prevFCD16 = fcd16
+ }
+ }
+ }
+ start = end + 1
+ }
+
+ // add Hangul LV syllables and LV+1 because of skippables
+ for c := hangulBase; c < hangulLimit; c += jamoTCount {
+ u.AddRune(c)
+ u.AddRune(c + 1)
+ }
+ u.AddRune(hangulLimit)
+}
+
+func (n *Normalizer) isAlgorithmicNoNo(norm16 uint16) bool {
+ return n.limitNoNo <= norm16 && norm16 < n.minMaybeYes
+}
+
+func (n *Normalizer) GetFCD16(c rune) uint16 {
+ if c < n.minDecompNoCP {
+ return 0
+ } else if c <= 0xffff {
+ if !n.singleLeadMightHaveNonZeroFCD16(c) {
+ return 0
+ }
+ }
+ return n.getFCD16FromNormData(c)
+}
+
+func (n *Normalizer) singleLeadMightHaveNonZeroFCD16(lead rune) bool {
+ // 0<=lead<=0xffff
+ bits := n.smallFCD[lead>>8]
+ if bits == 0 {
+ return false
+ }
+ return ((bits >> ((lead >> 5) & 7)) & 1) != 0
+}
+
+func (n *Normalizer) getFCD16FromNormData(c rune) uint16 {
+ norm16 := n.getNorm16(c)
+ if norm16 >= n.limitNoNo {
+ if norm16 >= minNormalMaybeYes {
+ // combining mark
+ norm16 = uint16(n.getCCFromNormalYesOrMaybe(norm16))
+ return norm16 | (norm16 << 8)
+ } else if norm16 >= n.minMaybeYes {
+ return 0
+ } else { // isDecompNoAlgorithmic(norm16)
+ deltaTrailCC := norm16 & deltaTcccMask
+ if deltaTrailCC <= deltaTccc1 {
+ return deltaTrailCC >> offsetShift
+ }
+ // Maps to an isCompYesAndZeroCC.
+ c = n.mapAlgorithmic(c, norm16)
+ norm16 = n.getRawNorm16(c)
+ }
+ }
+
+ if norm16 <= n.minYesNo || n.isHangulLVT(norm16) {
+ // no decomposition or Hangul syllable, all zeros
+ return 0
+ }
+ // c decomposes, get everything from the variable-length extra data
+ mapping := n.getMapping(norm16)
+ firstUnit := mapping[1]
+ if firstUnit&mappingHasCccLcccWord != 0 {
+ norm16 |= mapping[0] & 0xff00
+ }
+ return norm16
+}
+
+func (n *Normalizer) getMapping(norm16 uint16) []uint16 {
+ return n.extraData[(norm16>>offsetShift)-1:]
+}
+
+func (n *Normalizer) getNorm16(c rune) uint16 {
+ if utf16.IsLead(c) {
+ return inert
+ }
+ return n.getRawNorm16(c)
+}
+
+func (n *Normalizer) getRawNorm16(c rune) uint16 {
+ return uint16(n.normTrie.Get(c))
+}
+
+func (n *Normalizer) getCCFromNormalYesOrMaybe(norm16 uint16) uint8 {
+ return uint8(norm16 >> offsetShift)
+}
+
+func (n *Normalizer) mapAlgorithmic(c rune, norm16 uint16) rune {
+ return c + rune(norm16>>deltaShift) - rune(n.centerNoNoDelta)
+}
+
+func (n *Normalizer) isHangulLV(norm16 uint16) bool {
+ return norm16 == n.minYesNo
+}
+
+func (n *Normalizer) isHangulLVT(norm16 uint16) bool {
+ return norm16 == n.hangulLVT()
+}
+
+func (n *Normalizer) hangulLVT() uint16 {
+ return n.minYesNoMappingsOnly | hasCompBoundaryAfter
+}
+
+func (n *Normalizer) getComposeQuickCheck(c rune) CheckResult {
+ return n.getCompQuickCheck(n.getNorm16(c))
+}
+
+func (n *Normalizer) getDecomposeQuickCheck(c rune) CheckResult {
+ if n.isDecompYes(n.getNorm16(c)) {
+ return Yes
+ }
+ return No
+}
+
+func QuickCheck(c rune, mode Mode) CheckResult {
+ if mode <= NormNone || NormFcd <= mode {
+ return Yes
+ }
+ switch mode {
+ case NormNfc:
+ return Nfc().getComposeQuickCheck(c)
+ case NormNfd:
+ return Nfc().getDecomposeQuickCheck(c)
+ case NormNfkc:
+ return Nfkc().getComposeQuickCheck(c)
+ case NormNfkd:
+ return Nfkc().getDecomposeQuickCheck(c)
+ default:
+ return Maybe
+ }
+}
+
+func IsInert(c rune, mode Mode) bool {
+ switch mode {
+ case NormNfc:
+ return Nfc().isCompInert(c)
+ case NormNfd:
+ return Nfc().isDecompInert(c)
+ case NormNfkc:
+ return Nfkc().isCompInert(c)
+ case NormNfkd:
+ return Nfkc().isDecompInert(c)
+ default:
+ return true
+ }
+}
+
+func (n *Normalizer) isDecompYes(norm16 uint16) bool {
+ return norm16 < n.minYesNo || n.minMaybeYes <= norm16
+}
+
+func (n *Normalizer) getCompQuickCheck(norm16 uint16) CheckResult {
+ if norm16 < n.minNoNo || minYesYesWithCC <= norm16 {
+ return Yes
+ } else if n.minMaybeYes <= norm16 {
+ return Maybe
+ } else {
+ return No
+ }
+}
+
+func (n *Normalizer) isMaybeOrNonZeroCC(norm16 uint16) bool {
+ return norm16 >= n.minMaybeYes
+}
+
+func (n *Normalizer) isDecompNoAlgorithmic(norm16 uint16) bool {
+ return norm16 >= n.limitNoNo
+}
+
+func (n *Normalizer) IsCompNo(c rune) bool {
+ norm16 := n.getNorm16(c)
+ return n.minNoNo <= norm16 && norm16 < n.minMaybeYes
+}
+
+func (n *Normalizer) Decompose(c rune) []rune {
+ norm16 := n.getNorm16(c)
+ if c < n.minDecompNoCP || n.isMaybeOrNonZeroCC(norm16) {
+ // c does not decompose
+ return nil
+ }
+ var decomp []rune
+
+ if n.isDecompNoAlgorithmic(norm16) {
+ // Maps to an isCompYesAndZeroCC.
+ c = n.mapAlgorithmic(c, norm16)
+ decomp = append(decomp, c)
+ // The mapping might decompose further.
+ norm16 = n.getRawNorm16(c)
+ }
+ if norm16 < n.minYesNo {
+ return decomp
+ } else if n.isHangulLV(norm16) || n.isHangulLVT(norm16) {
+ // Hangul syllable: decompose algorithmically
+ parts := hangulDecompose(c)
+ for len(parts) > 0 {
+ c = rune(parts[0])
+ decomp = append(decomp, c)
+ parts = parts[1:]
+ }
+ return decomp
+ }
+ // c decomposes, get everything from the variable-length extra data
+ mapping := n.getMapping(norm16)
+ length := mapping[1] & mappingLengthMask
+ mapping = mapping[2 : 2+length]
+
+ for len(mapping) > 0 {
+ c, mapping = utf16.NextUnsafe(mapping)
+ decomp = append(decomp, c)
+ }
+
+ return decomp
+}
+
+func hangulDecompose(c rune) []uint16 {
+ c -= hangulBase
+ c2 := c % jamoTCount
+ c /= jamoTCount
+ var buffer []uint16
+ buffer = append(buffer, uint16(jamoLBase+c/jamoVCount))
+ buffer = append(buffer, uint16(jamoVBase+c%jamoVCount))
+ if c2 != 0 {
+ buffer = append(buffer, uint16(jamoTBase+c2))
+ }
+ return buffer
+}
+
+func (n *Normalizer) isCompInert(c rune) bool {
+ norm16 := n.getNorm16(c)
+ return n.isCompYesAndZeroCC(norm16) && (norm16&hasCompBoundaryAfter) != 0
+}
+
+func (n *Normalizer) isDecompInert(c rune) bool {
+ return n.isDecompYesAndZeroCC(n.getNorm16(c))
+}
+
+func (n *Normalizer) isCompYesAndZeroCC(norm16 uint16) bool {
+ return norm16 < n.minNoNo
+}
+
+func (n *Normalizer) isDecompYesAndZeroCC(norm16 uint16) bool {
+ return norm16 < n.minYesNo ||
+ norm16 == jamoVt ||
+ (n.minMaybeYes <= norm16 && norm16 <= minNormalMaybeYes)
+}
+
+func (n *Normalizer) CombiningClass(c rune) uint8 {
+ return n.getCC(n.getNorm16(c))
+}
+
+func (n *Normalizer) getCC(norm16 uint16) uint8 {
+ if norm16 >= minNormalMaybeYes {
+ return n.getCCFromNormalYesOrMaybe(norm16)
+ }
+ if norm16 < n.minNoNo || n.limitNoNo <= norm16 {
+ return 0
+ }
+ return n.getCCFromNoNo(norm16)
+
+}
+
+func (n *Normalizer) getCCFromNoNo(norm16 uint16) uint8 {
+ mapping := n.getMapping(norm16)
+ if mapping[1]&mappingHasCccLcccWord != 0 {
+ return uint8(mapping[0])
+ }
+ return 0
+}
diff --git a/go/mysql/icuregex/internal/pattern/unescape.go b/go/mysql/icuregex/internal/pattern/unescape.go
new file mode 100644
index 00000000000..e4a554ff612
--- /dev/null
+++ b/go/mysql/icuregex/internal/pattern/unescape.go
@@ -0,0 +1,314 @@
+/*
+© 2016 and later: Unicode, Inc. and others.
+Copyright (C) 2004-2015, International Business Machines Corporation and others.
+Copyright 2023 The Vitess Authors.
+
+This file contains code derived from the Unicode Project's ICU library.
+License & terms of use for the original code: http://www.unicode.org/copyright.html
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package pattern
+
+import (
+ "strings"
+ "unicode/utf8"
+
+ "vitess.io/vitess/go/mysql/icuregex/internal/utf16"
+)
+
+/* Convert one octal digit to a numeric value 0..7, or -1 on failure */
+func _digit8(c rune) rune {
+ if c >= 0x0030 && c <= 0x0037 {
+ return (c - 0x0030)
+ }
+ return -1
+}
+
+/* Convert one hex digit to a numeric value 0..F, or -1 on failure */
+func _digit16(c rune) rune {
+ if c >= 0x0030 && c <= 0x0039 {
+ return (c - 0x0030)
+ }
+ if c >= 0x0041 && c <= 0x0046 {
+ return (c - (0x0041 - 10))
+ }
+ if c >= 0x0061 && c <= 0x0066 {
+ return (c - (0x0061 - 10))
+ }
+ return -1
+}
+
+var unscapeMap = []byte{
+ /*" 0x22, 0x22 */
+ /*' 0x27, 0x27 */
+ /*? 0x3F, 0x3F */
+ /*\ 0x5C, 0x5C */
+ /*a*/ 0x61, 0x07,
+ /*b*/ 0x62, 0x08,
+ /*e*/ 0x65, 0x1b,
+ /*f*/ 0x66, 0x0c,
+ /*n*/ 0x6E, 0x0a,
+ /*r*/ 0x72, 0x0d,
+ /*t*/ 0x74, 0x09,
+ /*v*/ 0x76, 0x0b,
+}
+
+func Unescape(str string) (string, bool) {
+ var idx int
+ if idx = strings.IndexByte(str, '\\'); idx < 0 {
+ return str, true
+ }
+
+ var result strings.Builder
+ result.WriteString(str[:idx])
+ str = str[idx:]
+
+ for len(str) > 0 {
+ if str[0] == '\\' {
+ var r rune
+ r, str = UnescapeAt(str[1:])
+ if r < 0 {
+ return "", false
+ }
+ result.WriteRune(r)
+ } else {
+ result.WriteByte(str[0])
+ str = str[1:]
+ }
+ }
+ return result.String(), true
+}
+
+func UnescapeAt(str string) (rune, string) {
+ c, w := utf8.DecodeRuneInString(str)
+ str = str[w:]
+ if c == utf8.RuneError && (w == 0 || w == 1) {
+ return -1, str
+ }
+
+ var minDig, maxDig, n int
+ var braces bool
+ var bitsPerDigit = 4
+ var result rune
+
+ switch c {
+ case 'u':
+ minDig = 4
+ maxDig = 4
+ case 'U':
+ minDig = 8
+ maxDig = 8
+ case 'x':
+ minDig = 1
+ if len(str) > 0 && str[0] == '{' {
+ str = str[1:]
+ braces = true
+ maxDig = 8
+ } else {
+ maxDig = 2
+ }
+ default:
+ if dig := _digit8(c); dig >= 0 {
+ minDig = 1
+ maxDig = 4
+ n = 1
+ bitsPerDigit = 3
+ result = dig
+ }
+ }
+
+ if minDig != 0 {
+ for n < maxDig && len(str) > 0 {
+ c, w = utf8.DecodeRuneInString(str)
+ if c == utf8.RuneError && w == 1 {
+ return -1, str
+ }
+
+ var dig rune
+ if bitsPerDigit == 3 {
+ dig = _digit8(c)
+ } else {
+ dig = _digit16(c)
+ }
+ if dig < 0 {
+ break
+ }
+ result = (result << bitsPerDigit) | dig
+ str = str[w:]
+ n++
+ }
+ if n < minDig {
+ return -1, str
+ }
+ if braces {
+ if c != '}' {
+ return -1, str
+ }
+ str = str[1:]
+ }
+ if result < 0 || result > utf8.MaxRune {
+ return -1, str
+ }
+ if len(str) > 0 && utf16.IsLead(result) {
+ c, w = utf8.DecodeRuneInString(str)
+ if c == utf8.RuneError && (w == 0 || w == 1) {
+ return -1, str
+ }
+ if c == '\\' {
+ var str2 string
+ c, str2 = UnescapeAt(str[1:])
+ if utf16.IsTrail(c) {
+ result = utf16.DecodeRune(result, c)
+ str = str2
+ }
+ }
+ }
+ return result, str
+ }
+
+ if c < utf8.RuneSelf {
+ for i := 0; i < len(unscapeMap); i += 2 {
+ if byte(c) == unscapeMap[i] {
+ return rune(unscapeMap[i+1]), str
+ }
+ if byte(c) < unscapeMap[i] {
+ break
+ }
+ }
+ }
+
+ if c == 'c' && len(str) > 0 {
+ c, w = utf8.DecodeRuneInString(str)
+ if c == utf8.RuneError && (w == 0 || w == 1) {
+ return -1, str
+ }
+ return 0x1f & c, str[w:]
+ }
+
+ return c, str
+}
+
+func UnescapeAtRunes(str []rune) (rune, []rune) {
+ if len(str) == 0 {
+ return -1, str
+ }
+
+ c := str[0]
+ str = str[1:]
+ if c == utf8.RuneError {
+ return -1, str
+ }
+
+ var minDig, maxDig, n int
+ var braces bool
+ var bitsPerDigit = 4
+ var result rune
+
+ switch c {
+ case 'u':
+ minDig = 4
+ maxDig = 4
+ case 'U':
+ minDig = 8
+ maxDig = 8
+ case 'x':
+ minDig = 1
+ if len(str) > 0 && str[0] == '{' {
+ str = str[1:]
+ braces = true
+ maxDig = 8
+ } else {
+ maxDig = 2
+ }
+ default:
+ if dig := _digit8(c); dig >= 0 {
+ minDig = 1
+ maxDig = 4
+ n = 1
+ bitsPerDigit = 3
+ result = dig
+ }
+ }
+
+ if minDig != 0 {
+ for n < maxDig && len(str) > 0 {
+ c = str[0]
+ if c == utf8.RuneError {
+ return -1, str
+ }
+
+ var dig rune
+ if bitsPerDigit == 3 {
+ dig = _digit8(c)
+ } else {
+ dig = _digit16(c)
+ }
+ if dig < 0 {
+ break
+ }
+ result = (result << bitsPerDigit) | dig
+ str = str[1:]
+ n++
+ }
+ if n < minDig {
+ return -1, str
+ }
+ if braces {
+ if c != '}' {
+ return -1, str
+ }
+ str = str[1:]
+ }
+ if result < 0 || result > utf8.MaxRune {
+ return -1, str
+ }
+ if len(str) > 0 && utf16.IsLead(result) {
+ c = str[0]
+ if c == utf8.RuneError {
+ return -1, str
+ }
+ if c == '\\' {
+ var str2 []rune
+ c, str2 = UnescapeAtRunes(str[1:])
+ if utf16.IsTrail(c) {
+ result = utf16.DecodeRune(result, c)
+ str = str2
+ }
+ }
+ }
+ return result, str
+ }
+
+ if c < utf8.RuneSelf {
+ for i := 0; i < len(unscapeMap); i += 2 {
+ if byte(c) == unscapeMap[i] {
+ return rune(unscapeMap[i+1]), str
+ }
+ if byte(c) < unscapeMap[i] {
+ break
+ }
+ }
+ }
+
+ if c == 'c' && len(str) > 0 {
+ c = str[0]
+ if c == utf8.RuneError {
+ return -1, str
+ }
+ return 0x1f & c, str[1:]
+ }
+
+ return c, str
+}
diff --git a/go/mysql/icuregex/internal/pattern/unescape_test.go b/go/mysql/icuregex/internal/pattern/unescape_test.go
new file mode 100644
index 00000000000..0bb76c2bfdb
--- /dev/null
+++ b/go/mysql/icuregex/internal/pattern/unescape_test.go
@@ -0,0 +1,48 @@
+/*
+© 2016 and later: Unicode, Inc. and others.
+Copyright (C) 2004-2015, International Business Machines Corporation and others.
+Copyright 2023 The Vitess Authors.
+
+This file contains code derived from the Unicode Project's ICU library.
+License & terms of use for the original code: http://www.unicode.org/copyright.html
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package pattern
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestUnescapeAt(t *testing.T) {
+ r, str := UnescapeAt("ud800\\ud800\\udc00")
+ assert.Equal(t, rune(0xd800), r)
+ assert.Equal(t, "\\ud800\\udc00", str)
+
+ r, str = UnescapeAt(str[1:])
+ assert.Equal(t, rune(0x00010000), r)
+ assert.Equal(t, "", str)
+}
+
+func TestUnescapeAtRunes(t *testing.T) {
+ r, str := UnescapeAtRunes([]rune("ud800\\ud800\\udc00"))
+ assert.Equal(t, rune(0xd800), r)
+ assert.Equal(t, []rune("\\ud800\\udc00"), str)
+
+ r, str = UnescapeAtRunes(str[1:])
+ assert.Equal(t, rune(0x00010000), r)
+ assert.Equal(t, []rune(""), str)
+}
diff --git a/go/mysql/icuregex/internal/pattern/utils.go b/go/mysql/icuregex/internal/pattern/utils.go
new file mode 100644
index 00000000000..4dcf55e9f42
--- /dev/null
+++ b/go/mysql/icuregex/internal/pattern/utils.go
@@ -0,0 +1,111 @@
+/*
+© 2016 and later: Unicode, Inc. and others.
+Copyright (C) 2004-2015, International Business Machines Corporation and others.
+Copyright 2023 The Vitess Authors.
+
+This file contains code derived from the Unicode Project's ICU library.
+License & terms of use for the original code: http://www.unicode.org/copyright.html
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package pattern
+
+import (
+ "strings"
+ "unicode/utf8"
+)
+
+var patternPropsLatin1 = [256]uint8{
+ // WS: 9..D
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 5, 5, 5, 5, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ // WS: 20 Syntax: 21..2F
+ 5, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
+ // Syntax: 3A..40
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3,
+ 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ // Syntax: 5B..5E
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0,
+ // Syntax: 60
+ 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ // Syntax: 7B..7E
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0,
+ // WS: 85
+ 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ // Syntax: A1..A7, A9, AB, AC, AE
+ 0, 3, 3, 3, 3, 3, 3, 3, 0, 3, 0, 3, 3, 0, 3, 0,
+ // Syntax: B0, B1, B6, BB, BF
+ 3, 3, 0, 0, 0, 0, 3, 0, 0, 0, 0, 3, 0, 0, 0, 3,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ // Syntax: D7
+ 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ // Syntax: F7
+ 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0,
+}
+
+func IsWhitespace(c rune) bool {
+ if c < 0 {
+ return false
+ } else if c <= 0xff {
+ return (patternPropsLatin1[c]>>2)&1 != 0
+ } else if 0x200e <= c && c <= 0x2029 {
+ return c <= 0x200f || 0x2028 <= c
+ } else {
+ return false
+ }
+}
+
+func SkipWhitespace(str string) string {
+ for {
+ r, w := utf8.DecodeRuneInString(str)
+ if r == utf8.RuneError && (w == 0 || w == 1) {
+ return str[w:]
+ }
+ if !IsWhitespace(r) {
+ return str
+ }
+ str = str[w:]
+ }
+}
+
+func IsUnprintable(c rune) bool {
+ return !(c >= 0x20 && c <= 0x7E)
+}
+
+// "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+var digits = [...]byte{
+ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
+ 65, 66, 67, 68, 69, 70, 71, 72, 73, 74,
+ 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
+ 85, 86, 87, 88, 89, 90,
+}
+
+func EscapeUnprintable(w *strings.Builder, c rune) {
+ w.WriteByte('\\')
+ if (c & ^0xFFFF) != 0 {
+ w.WriteByte('U')
+ w.WriteByte(digits[0xF&(c>>28)])
+ w.WriteByte(digits[0xF&(c>>24)])
+ w.WriteByte(digits[0xF&(c>>20)])
+ w.WriteByte(digits[0xF&(c>>16)])
+ } else {
+ w.WriteByte('u')
+ }
+ w.WriteByte(digits[0xF&(c>>12)])
+ w.WriteByte(digits[0xF&(c>>8)])
+ w.WriteByte(digits[0xF&(c>>4)])
+ w.WriteByte(digits[0xF&c])
+}
diff --git a/go/mysql/icuregex/internal/ubidi/loader.go b/go/mysql/icuregex/internal/ubidi/loader.go
new file mode 100644
index 00000000000..e30ca402f81
--- /dev/null
+++ b/go/mysql/icuregex/internal/ubidi/loader.go
@@ -0,0 +1,125 @@
+/*
+© 2016 and later: Unicode, Inc. and others.
+Copyright (C) 2004-2015, International Business Machines Corporation and others.
+Copyright 2023 The Vitess Authors.
+
+This file contains code derived from the Unicode Project's ICU library.
+License & terms of use for the original code: http://www.unicode.org/copyright.html
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package ubidi
+
+import (
+ "errors"
+ "sync"
+
+ "vitess.io/vitess/go/mysql/icuregex/internal/icudata"
+ "vitess.io/vitess/go/mysql/icuregex/internal/udata"
+ "vitess.io/vitess/go/mysql/icuregex/internal/utrie"
+)
+
+var ubidiOnce sync.Once
+var ubidi struct {
+ indexes []int32
+ trie *utrie.UTrie2
+ mirrors []uint32
+ jg []uint8
+ jg2 []uint8
+}
+
+func indexes() []int32 {
+ loadUBidi()
+ return ubidi.indexes
+}
+
+func trie() *utrie.UTrie2 {
+ loadUBidi()
+ return ubidi.trie
+}
+
+func mirrors() []uint32 {
+ loadUBidi()
+ return ubidi.mirrors
+}
+
+func jg() []uint8 {
+ loadUBidi()
+ return ubidi.jg
+}
+
+func jg2() []uint8 {
+ loadUBidi()
+ return ubidi.jg2
+}
+
+func loadUBidi() {
+ ubidiOnce.Do(func() {
+ b := udata.NewBytes(icudata.UBidi)
+ if err := readData(b); err != nil {
+ panic(err)
+ }
+ })
+}
+
+func readData(bytes *udata.Bytes) error {
+ err := bytes.ReadHeader(func(info *udata.DataInfo) bool {
+ return info.DataFormat[0] == 0x42 &&
+ info.DataFormat[1] == 0x69 &&
+ info.DataFormat[2] == 0x44 &&
+ info.DataFormat[3] == 0x69 &&
+ info.FormatVersion[0] == 2
+ })
+ if err != nil {
+ return err
+ }
+
+ count := int32(bytes.Uint32())
+ if count < ixTop {
+ return errors.New("indexes[0] too small in ucase.icu")
+ }
+
+ ubidi.indexes = make([]int32, count)
+ ubidi.indexes[0] = count
+
+ for i := int32(1); i < count; i++ {
+ ubidi.indexes[i] = int32(bytes.Uint32())
+ }
+
+ ubidi.trie, err = utrie.UTrie2FromBytes(bytes)
+ if err != nil {
+ return err
+ }
+
+ expectedTrieLength := ubidi.indexes[ixTrieSize]
+ trieLength := ubidi.trie.SerializedLength()
+
+ if trieLength > expectedTrieLength {
+ return errors.New("ucase.icu: not enough bytes for the trie")
+ }
+
+ bytes.Skip(expectedTrieLength - trieLength)
+
+ if n := ubidi.indexes[ixMirrorLength]; n > 0 {
+ ubidi.mirrors = bytes.Uint32Slice(n)
+ }
+ if n := ubidi.indexes[ixJgLimit] - ubidi.indexes[ixJgStart]; n > 0 {
+ ubidi.jg = bytes.Uint8Slice(n)
+ }
+ if n := ubidi.indexes[ixJgLimit2] - ubidi.indexes[ixJgStart2]; n > 0 {
+ ubidi.jg2 = bytes.Uint8Slice(n)
+ }
+
+ return nil
+}
diff --git a/go/mysql/icuregex/internal/ubidi/ubidi.go b/go/mysql/icuregex/internal/ubidi/ubidi.go
new file mode 100644
index 00000000000..79482dfbc8d
--- /dev/null
+++ b/go/mysql/icuregex/internal/ubidi/ubidi.go
@@ -0,0 +1,390 @@
+/*
+© 2016 and later: Unicode, Inc. and others.
+Copyright (C) 2004-2015, International Business Machines Corporation and others.
+Copyright 2023 The Vitess Authors.
+
+This file contains code derived from the Unicode Project's ICU library.
+License & terms of use for the original code: http://www.unicode.org/copyright.html
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package ubidi
+
+const (
+ ixIndexTop = iota
+ ixLength
+ ixTrieSize
+ ixMirrorLength
+
+ ixJgStart
+ ixJgLimit
+ ixJgStart2 /* new in format version 2.2, ICU 54 */
+ ixJgLimit2
+
+ maxValuesIndex
+ ixTop
+)
+
+const (
+ /* UBIDI_CLASS_SHIFT=0, */ /* bidi class: 5 bits (4..0) */
+ jtShift = 5 /* joining type: 3 bits (7..5) */
+
+ bptShift = 8 /* Bidi_Paired_Bracket_Type(bpt): 2 bits (9..8) */
+
+ joinControlShift = 10
+ bidiControlShift = 11
+
+ isMirroredShift = 12 /* 'is mirrored' */
+)
+
+/**
+ * Bidi Paired Bracket Type constants.
+ *
+ * @see UCHAR_BIDI_PAIRED_BRACKET_TYPE
+ * @stable ICU 52
+ */
+type UPairedBracketType int32
+
+/*
+ * Note: UBidiPairedBracketType constants are parsed by preparseucd.py.
+ * It matches lines like
+ * U_BPT_
+ */
+const (
+ /** Not a paired bracket. @stable ICU 52 */
+ BptNone UPairedBracketType = iota
+ /** Open paired bracket. @stable ICU 52 */
+ BptOpen
+ /** Close paired bracket. @stable ICU 52 */
+ BptClose
+)
+
+const classMask = 0x0000001f
+const jtMask = 0x000000e0
+const bptMask = 0x00000300
+
+/**
+ * Joining Type constants.
+ *
+ * @see UCHAR_JOINING_TYPE
+ * @stable ICU 2.2
+ */
+type JoiningType int32
+
+/*
+ * Note: UJoiningType constants are parsed by preparseucd.py.
+ * It matches lines like
+ * U_JT_
+ */
+const (
+ JtNonJoining JoiningType = iota /*[U]*/
+ JtJoinCausing /*[C]*/
+ JtDualJoining /*[D]*/
+ JtLeftJoining /*[L]*/
+ JtRightJoining /*[R]*/
+ JtTransparent /*[T]*/
+)
+
+/**
+ * Joining Group constants.
+ *
+ * @see UCHAR_JOINING_GROUP
+ * @stable ICU 2.2
+ */
+type JoiningGroup int32
+
+/*
+ * Note: UJoiningGroup constants are parsed by preparseucd.py.
+ * It matches lines like
+ * U_JG_
+ */
+const (
+ JgNoJoiningGroup JoiningGroup = iota
+ JgAin
+ JgAlaph
+ JgAlef
+ JgBeh
+ JgBeth
+ JgDal
+ JgDalathRish
+ JgE
+ JgFeh
+ JgFinalSemkath
+ JgGaf
+ JgGamal
+ JgHah
+ JgTehMarbutaGoal /**< @stable ICU 4.6 */
+ JgHe
+ JgHeh
+ JgHehGoal
+ JgHeth
+ JgKaf
+ JgKaph
+ JgKnottedHeh
+ JgLam
+ JgLamadh
+ JgMeem
+ JgMim
+ JgNoon
+ JgNun
+ JgPe
+ JgQaf
+ JgQaph
+ JgReh
+ JgReversedPe
+ JgSad
+ JgSadhe
+ JgSeen
+ JgSemkath
+ JgShin
+ JgSwashKaf
+ JgSyriacWaw
+ JgTah
+ JgTaw
+ JgTehMarbuta
+ JgTeth
+ JgWaw
+ JgYeh
+ JgYehBarree
+ JgYehWithTail
+ JgYudh
+ JgYudhHe
+ JgZain
+ JgFe /**< @stable ICU 2.6 */
+ JgKhaph /**< @stable ICU 2.6 */
+ JgZhain /**< @stable ICU 2.6 */
+ JgBurushashkiYehBarree /**< @stable ICU 4.0 */
+ JgFarsiYeh /**< @stable ICU 4.4 */
+ JgNya /**< @stable ICU 4.4 */
+ JgRohingyaYeh /**< @stable ICU 49 */
+ JgManichaeanAleph /**< @stable ICU 54 */
+ JgManichaeanAyin /**< @stable ICU 54 */
+ JgManichaeanBeth /**< @stable ICU 54 */
+ JgManichaeanDaleth /**< @stable ICU 54 */
+ JgManichaeanDhamedh /**< @stable ICU 54 */
+ JgManichaeanFive /**< @stable ICU 54 */
+ JgManichaeanGimel /**< @stable ICU 54 */
+ JgManichaeanHeth /**< @stable ICU 54 */
+ JgManichaeanHundred /**< @stable ICU 54 */
+ JgManichaeanKaph /**< @stable ICU 54 */
+ JgManichaeanLamedh /**< @stable ICU 54 */
+ JgManichaeanMem /**< @stable ICU 54 */
+ JgManichaeanNun /**< @stable ICU 54 */
+ JgManichaeanOne /**< @stable ICU 54 */
+ JgManichaeanPe /**< @stable ICU 54 */
+ JgManichaeanQoph /**< @stable ICU 54 */
+ JgManichaeanResh /**< @stable ICU 54 */
+ JgManichaeanSadhe /**< @stable ICU 54 */
+ JgManichaeanSamekh /**< @stable ICU 54 */
+ JgManichaeanTaw /**< @stable ICU 54 */
+ JgManichaeanTen /**< @stable ICU 54 */
+ JgManichaeanTeth /**< @stable ICU 54 */
+ JgManichaeanThamedh /**< @stable ICU 54 */
+ JgManichaeanTwenty /**< @stable ICU 54 */
+ JgManichaeanWaw /**< @stable ICU 54 */
+ JgManichaeanYodh /**< @stable ICU 54 */
+ JgManichaeanZayin /**< @stable ICU 54 */
+ JgStraightWaw /**< @stable ICU 54 */
+ JgAfricanFeh /**< @stable ICU 58 */
+ JgAfricanNoon /**< @stable ICU 58 */
+ JgAfricanQaf /**< @stable ICU 58 */
+
+ JgMalayalamBha /**< @stable ICU 60 */
+ JgMalayalamJa /**< @stable ICU 60 */
+ JgMalayalamLla /**< @stable ICU 60 */
+ JgMalayalamLlla /**< @stable ICU 60 */
+ JgMalayalamNga /**< @stable ICU 60 */
+ JgMalayalamNna /**< @stable ICU 60 */
+ JgMalayalamNnna /**< @stable ICU 60 */
+ JgMalayalamNya /**< @stable ICU 60 */
+ JgMalayalamRa /**< @stable ICU 60 */
+ JgMalayalamSsa /**< @stable ICU 60 */
+ JgMalayalamTta /**< @stable ICU 60 */
+
+ JgHanafiRohingyaKinnaYa /**< @stable ICU 62 */
+ JgHanafiRohingyaPa /**< @stable ICU 62 */
+
+ JgThinYeh /**< @stable ICU 70 */
+ JgVerticalTail /**< @stable ICU 70 */
+)
+
+/**
+ * This specifies the language directional property of a character set.
+ * @stable ICU 2.0
+ */
+type CharDirection int32
+
+/*
+ * Note: UCharDirection constants and their API comments are parsed by preparseucd.py.
+ * It matches pairs of lines like
+ * / ** comment... * /
+ * U_<[A-Z_]+> = ,
+ */
+
+const (
+ /** L @stable ICU 2.0 */
+ LeftToRight CharDirection = 0
+ /** R @stable ICU 2.0 */
+ RightToLeft CharDirection = 1
+ /** EN @stable ICU 2.0 */
+ EuropeanNumber CharDirection = 2
+ /** ES @stable ICU 2.0 */
+ EuropeanNumberSeparator CharDirection = 3
+ /** ET @stable ICU 2.0 */
+ EuropeanNumberTerminator CharDirection = 4
+ /** AN @stable ICU 2.0 */
+ ArabicNumber CharDirection = 5
+ /** CS @stable ICU 2.0 */
+ CommonNumberSeparator CharDirection = 6
+ /** B @stable ICU 2.0 */
+ BlockSeparator CharDirection = 7
+ /** S @stable ICU 2.0 */
+ SegmentSeparator CharDirection = 8
+ /** WS @stable ICU 2.0 */
+ WhiteSpaceNeutral CharDirection = 9
+ /** ON @stable ICU 2.0 */
+ OtherNeutral CharDirection = 10
+ /** LRE @stable ICU 2.0 */
+ LeftToRightEmbedding CharDirection = 11
+ /** LRO @stable ICU 2.0 */
+ LeftToRightOverride CharDirection = 12
+ /** AL @stable ICU 2.0 */
+ RightToLeftArabic CharDirection = 13
+ /** RLE @stable ICU 2.0 */
+ RightToLeftEmbedding CharDirection = 14
+ /** RLO @stable ICU 2.0 */
+ RightToLeftOverride CharDirection = 15
+ /** PDF @stable ICU 2.0 */
+ PopDirectionalFormat CharDirection = 16
+ /** NSM @stable ICU 2.0 */
+ DirNonSpacingMark CharDirection = 17
+ /** BN @stable ICU 2.0 */
+ BoundaryNeutral CharDirection = 18
+ /** FSI @stable ICU 52 */
+ StrongIsolate CharDirection = 19
+ /** LRI @stable ICU 52 */
+ LeftToRightIsolate CharDirection = 20
+ /** RLI @stable ICU 52 */
+ RightToLeftIsolate CharDirection = 21
+ /** PDI @stable ICU 52 */
+ PopDirectionalIsolate CharDirection = 22
+)
+
+type propertySet interface {
+ AddRune(ch rune)
+ AddRuneRange(from rune, to rune)
+}
+
+func AddPropertyStarts(sa propertySet) {
+ /* add the start code point of each same-value range of the trie */
+ trie().Enum(nil, func(start, _ rune, _ uint32) bool {
+ sa.AddRune(start)
+ return true
+ })
+
+ idxs := indexes()
+ mrs := mirrors()
+ /* add the code points from the bidi mirroring table */
+ length := idxs[ixMirrorLength]
+ for i := int32(0); i < length; i++ {
+ c := mirrorCodePoint(rune(mrs[i]))
+ sa.AddRuneRange(c, c+1)
+ }
+
+ /* add the code points from the Joining_Group array where the value changes */
+ start := idxs[ixJgStart]
+ limit := idxs[ixJgLimit]
+ jgArray := jg()
+ for {
+ prev := uint8(0)
+ for start < limit {
+ jg := jgArray[0]
+ jgArray = jgArray[1:]
+ if jg != prev {
+ sa.AddRune(start)
+ prev = jg
+ }
+ start++
+ }
+ if prev != 0 {
+ /* add the limit code point if the last value was not 0 (it is now start==limit) */
+ sa.AddRune(limit)
+ }
+ if limit == idxs[ixJgLimit] {
+ /* switch to the second Joining_Group range */
+ start = idxs[ixJgStart2]
+ limit = idxs[ixJgLimit2]
+ jgArray = jg2()
+ } else {
+ break
+ }
+ }
+
+ /* add code points with hardcoded properties, plus the ones following them */
+
+ /* (none right now) */
+}
+
+func HasFlag(props uint16, shift int) bool {
+ return ((props >> shift) & 1) != 0
+}
+
+func mirrorCodePoint(m rune) rune {
+ return m & 0x1fffff
+}
+
+func IsJoinControl(c rune) bool {
+ props := trie().Get16(c)
+ return HasFlag(props, joinControlShift)
+}
+
+func JoinType(c rune) JoiningType {
+ props := trie().Get16(c)
+ return JoiningType((props & jtMask) >> jtShift)
+}
+
+func JoinGroup(c rune) JoiningGroup {
+ idxs := indexes()
+ start := idxs[ixJgStart]
+ limit := idxs[ixJgLimit]
+ if start <= c && c < limit {
+ return JoiningGroup(jg()[c-start])
+ }
+ start = idxs[ixJgStart2]
+ limit = idxs[ixJgLimit2]
+ if start <= c && c < limit {
+ return JoiningGroup(jg2()[c-start])
+ }
+ return JgNoJoiningGroup
+}
+
+func IsMirrored(c rune) bool {
+ props := trie().Get16(c)
+ return HasFlag(props, isMirroredShift)
+}
+
+func IsBidiControl(c rune) bool {
+ props := trie().Get16(c)
+ return HasFlag(props, bidiControlShift)
+}
+
+func PairedBracketType(c rune) UPairedBracketType {
+ props := trie().Get16(c)
+ return UPairedBracketType((props & bptMask) >> bptShift)
+}
+
+func Class(c rune) CharDirection {
+ props := trie().Get16(c)
+ return CharDirection(props & classMask)
+}
diff --git a/go/mysql/icuregex/internal/ucase/fold.go b/go/mysql/icuregex/internal/ucase/fold.go
new file mode 100644
index 00000000000..728142042ba
--- /dev/null
+++ b/go/mysql/icuregex/internal/ucase/fold.go
@@ -0,0 +1,243 @@
+/*
+© 2016 and later: Unicode, Inc. and others.
+Copyright (C) 2004-2015, International Business Machines Corporation and others.
+Copyright 2023 The Vitess Authors.
+
+This file contains code derived from the Unicode Project's ICU library.
+License & terms of use for the original code: http://www.unicode.org/copyright.html
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package ucase
+
+import (
+ "math/bits"
+
+ "vitess.io/vitess/go/mysql/icuregex/internal/utf16"
+)
+
+func FoldRunes(str []rune) []rune {
+ out := make([]rune, 0, len(str))
+ for _, c := range str {
+ r, exp := FullFolding(c)
+ if exp == nil {
+ out = append(out, r)
+ continue
+ }
+
+ for len(exp) > 0 {
+ r, exp = utf16.NextUnsafe(exp)
+ out = append(out, r)
+ }
+ }
+ return out
+}
+
+/*
+ - Case folding is similar to lowercasing.
+ - The result may be a simple mapping, i.e., a single code point, or
+ - a full mapping, i.e., a string.
+ - If the case folding for a code point is the same as its simple (1:1) lowercase mapping,
+ - then only the lowercase mapping is stored.
+ *
+ - Some special cases are hardcoded because their conditions cannot be
+ - parsed and processed from CaseFolding.txt.
+ *
+ - Unicode 3.2 CaseFolding.txt specifies for its status field:
+
+# C: common case folding, common mappings shared by both simple and full mappings.
+# F: full case folding, mappings that cause strings to grow in length. Multiple characters are separated by spaces.
+# S: simple case folding, mappings to single characters where different from F.
+# T: special case for uppercase I and dotted uppercase I
+# - For non-Turkic languages, this mapping is normally not used.
+# - For Turkic languages (tr, az), this mapping can be used instead of the normal mapping for these characters.
+#
+# Usage:
+# A. To do a simple case folding, use the mappings with status C + S.
+# B. To do a full case folding, use the mappings with status C + F.
+#
+# The mappings with status T can be used or omitted depending on the desired case-folding
+# behavior. (The default option is to exclude them.)
+
+ - Unicode 3.2 has 'T' mappings as follows:
+
+0049; T; 0131; # LATIN CAPITAL LETTER I
+0130; T; 0069; # LATIN CAPITAL LETTER I WITH DOT ABOVE
+
+ - while the default mappings for these code points are:
+
+0049; C; 0069; # LATIN CAPITAL LETTER I
+0130; F; 0069 0307; # LATIN CAPITAL LETTER I WITH DOT ABOVE
+
+ - U+0130 has no simple case folding (simple-case-folds to itself).
+*/
+func Fold(c rune) rune {
+ props := trie().Get16(c)
+ if !hasException(props) {
+ if isUpperOrTitle(props) {
+ c += getDelta(props)
+ }
+ } else {
+ pe := getExceptions(props)
+ excWord := pe[0]
+ pe = pe[1:]
+ if (excWord & excConditionalFold) != 0 {
+ /* special case folding mappings, hardcoded */
+ /* default mappings */
+ if c == 0x49 {
+ /* 0049; C; 0069; # LATIN CAPITAL LETTER I */
+ return 0x69
+ } else if c == 0x130 {
+ /* no simple case folding for U+0130 */
+ return c
+ }
+ }
+ if (excWord & excNoSimpleCaseFolding) != 0 {
+ return c
+ }
+ if hasSlot(excWord, excDelta) && isUpperOrTitle(props) {
+ var delta int32
+ delta, _ = getSlotValue(excWord, excDelta, pe)
+ if excWord&excDeltaIsNegative == 0 {
+ return c + delta
+ }
+ return c - delta
+ }
+
+ var idx int32
+ if hasSlot(excWord, excFold) {
+ idx = excFold
+ } else if hasSlot(excWord, excLower) {
+ idx = excLower
+ } else {
+ return c
+ }
+ c, _ = getSlotValue(excWord, idx, pe)
+ }
+ return c
+}
+
+func FullFolding(c rune) (rune, []uint16) {
+ result := c
+ props := trie().Get16(c)
+
+ if !hasException(props) {
+ if isUpperOrTitle(props) {
+ result = c + getDelta(props)
+ }
+ return result, nil
+ }
+
+ pe := getExceptions(props)
+ excWord := pe[0]
+ pe = pe[1:]
+ var idx int32
+
+ if excWord&excConditionalFold != 0 {
+ /* use hardcoded conditions and mappings */
+ /* default mappings */
+ if c == 0x49 {
+ /* 0049; C; 0069; # LATIN CAPITAL LETTER I */
+ return 0x69, nil
+ } else if c == 0x130 {
+ /* 0130; F; 0069 0307; # LATIN CAPITAL LETTER I WITH DOT ABOVE */
+ return -1, []uint16{0x69, 0x307}
+ }
+ } else if hasSlot(excWord, excFullMappings) {
+ full, pe := getSlotValue(excWord, excFullMappings, pe)
+
+ /* start of full case mapping strings */
+ pe = pe[1:]
+
+ /* skip the lowercase result string */
+ pe = pe[full&fullLower:]
+ full = (full >> 4) & 0xf
+
+ if full != 0 {
+ /* set the output pointer to the result string */
+ return -1, pe[:full]
+ }
+ }
+
+ if excWord&excNoSimpleCaseFolding != 0 {
+ return result, nil
+ }
+ if hasSlot(excWord, excDelta) && isUpperOrTitle(props) {
+ delta, _ := getSlotValue(excWord, excDelta, pe)
+ if excWord&excDeltaIsNegative == 0 {
+ return c + delta, nil
+ }
+ return c - delta, nil
+ }
+ if hasSlot(excWord, excFold) {
+ idx = excFold
+ } else if hasSlot(excWord, excLower) {
+ idx = excLower
+ } else {
+ return c, nil
+ }
+ result, _ = getSlotValue(excWord, idx, pe)
+ return result, nil
+}
+
+const (
+ excLower = iota
+ excFold
+ excUpper
+ excTitle
+ excDelta
+ exc5 /* reserved */
+ excClosure
+ excFullMappings
+)
+
+const (
+ /* complex/conditional mappings */
+ excConditionalSpecial = 0x4000
+ excConditionalFold = 0x8000
+ excNoSimpleCaseFolding = 0x200
+ excDeltaIsNegative = 0x400
+ excSensitive = 0x800
+
+ excDoubleSlots = 0x100
+)
+
+func isUpperOrTitle(props uint16) bool {
+ return props&2 != 0
+}
+
+func getDelta(props uint16) rune {
+ return rune(int16(props) >> 7)
+}
+
+func getExceptions(props uint16) []uint16 {
+ return exceptions()[props>>4:]
+}
+
+func hasSlot(flags uint16, idx int32) bool {
+ return (flags & (1 << idx)) != 0
+}
+
+func slotOffset(flags uint16, idx int32) int {
+ return bits.OnesCount8(uint8(flags & ((1 << idx) - 1)))
+}
+
+func getSlotValue(excWord uint16, idx int32, pExc16 []uint16) (int32, []uint16) {
+ if excWord&excDoubleSlots == 0 {
+ pExc16 = pExc16[slotOffset(excWord, idx):]
+ return int32(pExc16[0]), pExc16
+ }
+ pExc16 = pExc16[2*slotOffset(excWord, idx):]
+ return (int32(pExc16[0]) << 16) | int32(pExc16[1]), pExc16[1:]
+}
diff --git a/go/mysql/icuregex/internal/ucase/loader.go b/go/mysql/icuregex/internal/ucase/loader.go
new file mode 100644
index 00000000000..2ac25cc0f6f
--- /dev/null
+++ b/go/mysql/icuregex/internal/ucase/loader.go
@@ -0,0 +1,101 @@
+/*
+© 2016 and later: Unicode, Inc. and others.
+Copyright (C) 2004-2015, International Business Machines Corporation and others.
+Copyright 2023 The Vitess Authors.
+
+This file contains code derived from the Unicode Project's ICU library.
+License & terms of use for the original code: http://www.unicode.org/copyright.html
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package ucase
+
+import (
+ "errors"
+ "sync"
+
+ "vitess.io/vitess/go/mysql/icuregex/internal/icudata"
+ "vitess.io/vitess/go/mysql/icuregex/internal/udata"
+ "vitess.io/vitess/go/mysql/icuregex/internal/utrie"
+)
+
+var ucaseOnce sync.Once
+var ucase struct {
+ trie *utrie.UTrie2
+ exceptions []uint16
+}
+
+func trie() *utrie.UTrie2 {
+ loadUCase()
+ return ucase.trie
+}
+
+func exceptions() []uint16 {
+ loadUCase()
+ return ucase.exceptions
+}
+
+func loadUCase() {
+ ucaseOnce.Do(func() {
+ b := udata.NewBytes(icudata.UCase)
+ if err := readData(b); err != nil {
+ panic(err)
+ }
+ })
+}
+
+func readData(bytes *udata.Bytes) error {
+ err := bytes.ReadHeader(func(info *udata.DataInfo) bool {
+ return info.DataFormat[0] == 0x63 &&
+ info.DataFormat[1] == 0x41 &&
+ info.DataFormat[2] == 0x53 &&
+ info.DataFormat[3] == 0x45 &&
+ info.FormatVersion[0] == 4
+ })
+ if err != nil {
+ return err
+ }
+
+ count := int32(bytes.Uint32())
+ if count < ixTop {
+ return errors.New("indexes[0] too small in ucase.icu")
+ }
+
+ indexes := make([]int32, count)
+ indexes[0] = count
+
+ for i := int32(1); i < count; i++ {
+ indexes[i] = int32(bytes.Uint32())
+ }
+
+ ucase.trie, err = utrie.UTrie2FromBytes(bytes)
+ if err != nil {
+ return err
+ }
+
+ expectedTrieLength := indexes[ixTrieSize]
+ trieLength := ucase.trie.SerializedLength()
+
+ if trieLength > expectedTrieLength {
+ return errors.New("ucase.icu: not enough bytes for the trie")
+ }
+
+ bytes.Skip(expectedTrieLength - trieLength)
+
+ if n := indexes[ixExcLength]; n > 0 {
+ ucase.exceptions = bytes.Uint16Slice(n)
+ }
+
+ return nil
+}
diff --git a/go/mysql/icuregex/internal/ucase/ucase.go b/go/mysql/icuregex/internal/ucase/ucase.go
new file mode 100644
index 00000000000..33fac0a5cce
--- /dev/null
+++ b/go/mysql/icuregex/internal/ucase/ucase.go
@@ -0,0 +1,359 @@
+/*
+© 2016 and later: Unicode, Inc. and others.
+Copyright (C) 2004-2015, International Business Machines Corporation and others.
+Copyright 2023 The Vitess Authors.
+
+This file contains code derived from the Unicode Project's ICU library.
+License & terms of use for the original code: http://www.unicode.org/copyright.html
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package ucase
+
+import (
+ "vitess.io/vitess/go/mysql/icuregex/internal/utf16"
+)
+
+const (
+ ixIndexTop = 0
+ ixLength = 1
+ ixTrieSize = 2
+ ixExcLength = 3
+ ixUnfoldLength = 4
+ ixMaxFullLength = 15
+ ixTop = 16
+)
+
+type propertySet interface {
+ AddRune(ch rune)
+}
+
+func AddPropertyStarts(sa propertySet) {
+ /* add the start code point of each same-value range of the trie */
+ trie().Enum(nil, func(start, _ rune, _ uint32) bool {
+ sa.AddRune(start)
+ return true
+ })
+
+ /* add code points with hardcoded properties, plus the ones following them */
+
+ /* (none right now, see comment below) */
+
+ /*
+ * Omit code points with hardcoded specialcasing properties
+ * because we do not build property UnicodeSets for them right now.
+ */
+}
+
+const (
+ fullMappingsMaxLength = (4 * 0xf)
+ closureMaxLength = 0xf
+
+ fullLower = 0xf
+ fullFolding = 0xf0
+ fullUpper = 0xf00
+ fullTitle = 0xf000
+)
+
+func AddCaseClosure(c rune, sa propertySet) {
+ /*
+ * Hardcode the case closure of i and its relatives and ignore the
+ * data file data for these characters.
+ * The Turkic dotless i and dotted I with their case mapping conditions
+ * and case folding option make the related characters behave specially.
+ * This code matches their closure behavior to their case folding behavior.
+ */
+
+ switch c {
+ case 0x49:
+ /* regular i and I are in one equivalence class */
+ sa.AddRune(0x69)
+ return
+ case 0x69:
+ sa.AddRune(0x49)
+ return
+ case 0x130:
+ /* dotted I is in a class with <0069 0307> (for canonical equivalence with <0049 0307>) */
+ // the Regex engine calls removeAllStrings() on all UnicodeSets, so we don't need to insert them
+ // sa->addString(sa->set, iDot, 2);
+ return
+ case 0x131:
+ /* dotless i is in a class by itself */
+ return
+ default:
+ /* otherwise use the data file data */
+ break
+ }
+
+ props := trie().Get16(c)
+ if !hasException(props) {
+ if getPropsType(props) != None {
+ /* add the one simple case mapping, no matter what type it is */
+ delta := getDelta(props)
+ if delta != 0 {
+ sa.AddRune(c + delta)
+ }
+ }
+ } else {
+ /*
+ * c has exceptions, so there may be multiple simple and/or
+ * full case mappings. Add them all.
+ */
+ pe := getExceptions(props)
+ excWord := pe[0]
+ pe = pe[1:]
+ var idx int32
+ var closure []uint16
+
+ /* add all simple case mappings */
+ for idx = excLower; idx <= excTitle; idx++ {
+ if hasSlot(excWord, idx) {
+ c, _ = getSlotValue(excWord, idx, pe)
+ sa.AddRune(c)
+ }
+ }
+ if hasSlot(excWord, excDelta) {
+ delta, _ := getSlotValue(excWord, excDelta, pe)
+ if excWord&excDeltaIsNegative == 0 {
+ sa.AddRune(c + delta)
+ } else {
+ sa.AddRune(c - delta)
+ }
+ }
+
+ /* get the closure string pointer & length */
+ if hasSlot(excWord, excClosure) {
+ closureLength, pe1 := getSlotValue(excWord, excClosure, pe)
+ closureLength &= closureMaxLength /* higher bits are reserved */
+ closure = pe1[1 : 1+closureLength] /* behind this slot, unless there are full case mappings */
+ }
+
+ /* add the full case folding */
+ if hasSlot(excWord, excFullMappings) {
+ fullLength, pe1 := getSlotValue(excWord, excFullMappings, pe)
+
+ /* start of full case mapping strings */
+ pe1 = pe1[1:]
+
+ fullLength &= 0xffff /* bits 16 and higher are reserved */
+
+ /* skip the lowercase result string */
+ pe1 = pe1[fullLength&fullLower:]
+ fullLength >>= 4
+
+ /* skip adding the case folding strings */
+ length := fullLength & 0xf
+ pe1 = pe1[length:]
+
+ /* skip the uppercase and titlecase strings */
+ fullLength >>= 4
+ pe1 = pe1[fullLength&0xf:]
+ fullLength >>= 4
+ pe1 = pe1[fullLength:]
+
+ closure = pe1[:len(closure)]
+ }
+
+ /* add each code point in the closure string */
+ for len(closure) > 0 {
+ c, closure = utf16.NextUnsafe(closure)
+ sa.AddRune(c)
+ }
+ }
+}
+
+const dotMask = 0x60
+
+const (
+ noDot = 0 /* normal characters with cc=0 */
+ softDotted = 0x20 /* soft-dotted characters with cc=0 */
+ above = 0x40 /* "above" accents with cc=230 */
+ otherAccent = 0x60 /* other accent character (0> excDotShift) & dotMask)
+}
+
+func IsCaseSensitive(c rune) bool {
+ props := trie().Get16(c)
+ if !hasException(props) {
+ return (props & sensitive) != 0
+ }
+ pe := getExceptions(props)
+ return (pe[0] & excSensitive) != 0
+}
+
+func ToFullLower(c rune) rune {
+ // The sign of the result has meaning, input must be non-negative so that it can be returned as is.
+ result := c
+ props := trie().Get16(c)
+ if !hasException(props) {
+ if isUpperOrTitle(props) {
+ result = c + getDelta(props)
+ }
+ } else {
+ pe := getExceptions(props)
+ excWord := pe[0]
+ pe = pe[1:]
+
+ if excWord&excConditionalSpecial != 0 {
+ /* use hardcoded conditions and mappings */
+ if c == 0x130 {
+ return 2
+ }
+ /* no known conditional special case mapping, use a normal mapping */
+ } else if hasSlot(excWord, excFullMappings) {
+ full, _ := getSlotValue(excWord, excFullMappings, pe)
+ full = full & fullLower
+ if full != 0 {
+ /* return the string length */
+ return full
+ }
+ }
+
+ if hasSlot(excWord, excDelta) && isUpperOrTitle(props) {
+ delta, _ := getSlotValue(excWord, excDelta, pe)
+ if (excWord & excDeltaIsNegative) == 0 {
+ return c + delta
+ }
+ return c - delta
+ }
+ if hasSlot(excWord, excLower) {
+ result, _ = getSlotValue(excWord, excLower, pe)
+ }
+ }
+
+ if result == c {
+ return ^result
+ }
+ return result
+}
+
+func ToFullUpper(c rune) rune {
+ return toUpperOrTitle(c, true)
+}
+
+func ToFullTitle(c rune) rune {
+ return toUpperOrTitle(c, false)
+}
+
+func toUpperOrTitle(c rune, upperNotTitle bool) rune {
+ result := c
+ props := trie().Get16(c)
+ if !hasException(props) {
+ if getPropsType(props) == Lower {
+ result = c + getDelta(props)
+ }
+ } else {
+ pe := getExceptions(props)
+ excWord := pe[0]
+ pe = pe[1:]
+
+ if excWord&excConditionalSpecial != 0 {
+ if c == 0x0587 {
+ return 2
+ }
+ /* no known conditional special case mapping, use a normal mapping */
+ } else if hasSlot(excWord, excFullMappings) {
+ full, _ := getSlotValue(excWord, excFullMappings, pe)
+
+ /* skip the lowercase and case-folding result strings */
+ full >>= 8
+
+ if upperNotTitle {
+ full &= 0xf
+ } else {
+ /* skip the uppercase result string */
+ full = (full >> 4) & 0xf
+ }
+
+ if full != 0 {
+ /* return the string length */
+ return full
+ }
+ }
+
+ if hasSlot(excWord, excDelta) && getPropsType(props) == Lower {
+ delta, _ := getSlotValue(excWord, excDelta, pe)
+ if (excWord & excDeltaIsNegative) == 0 {
+ return c + delta
+ }
+ return c - delta
+ }
+ var idx int32
+ if !upperNotTitle && hasSlot(excWord, excTitle) {
+ idx = excTitle
+ } else if hasSlot(excWord, excUpper) {
+ /* here, titlecase is same as uppercase */
+ idx = excUpper
+ } else {
+ return ^c
+ }
+ result, _ = getSlotValue(excWord, idx, pe)
+ }
+
+ if result == c {
+ return ^result
+ }
+ return result
+}
+
+func GetTypeOrIgnorable(c rune) int32 {
+ props := trie().Get16(c)
+ return int32(props & 7)
+}
+
+type Type int32
+
+const (
+ None Type = iota
+ Lower
+ Upper
+ Title
+)
+
+const typeMask = 3
+
+func GetType(c rune) Type {
+ props := trie().Get16(c)
+ return getPropsType(props)
+}
+
+func getPropsType(props uint16) Type {
+ return Type(props & typeMask)
+}
diff --git a/go/mysql/icuregex/internal/uchar/constants.go b/go/mysql/icuregex/internal/uchar/constants.go
new file mode 100644
index 00000000000..60899393397
--- /dev/null
+++ b/go/mysql/icuregex/internal/uchar/constants.go
@@ -0,0 +1,238 @@
+/*
+© 2016 and later: Unicode, Inc. and others.
+Copyright (C) 2004-2015, International Business Machines Corporation and others.
+Copyright 2023 The Vitess Authors.
+
+This file contains code derived from the Unicode Project's ICU library.
+License & terms of use for the original code: http://www.unicode.org/copyright.html
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package uchar
+
+func Mask[T ~int | ~int8](x T) uint32 {
+ return 1 << x
+}
+
+type Category int8
+
+const (
+ /*
+ * Note: UCharCategory constants and their API comments are parsed by preparseucd.py.
+ * It matches pairs of lines like
+ * / ** comment... * /
+ * U_<[A-Z_]+> = ,
+ */
+
+ /** Non-category for unassigned and non-character code points. @stable ICU 2.0 */
+ Unassigned Category = 0
+ /** Cn "Other, Not Assigned (no characters in [UnicodeData.txt] have this property)" (same as U_UNASSIGNED!) @stable ICU 2.0 */
+ GeneralOtherTypes Category = iota - 1
+ /** Lu @stable ICU 2.0 */
+ UppercaseLetter
+ /** Ll @stable ICU 2.0 */
+ LowercaseLetter
+ /** Lt @stable ICU 2.0 */
+ TitlecaseLetter
+ /** Lm @stable ICU 2.0 */
+ ModifierLetter
+ /** Lo @stable ICU 2.0 */
+ OtherLetter
+ /** Mn @stable ICU 2.0 */
+ NonSpacingMask
+ /** Me @stable ICU 2.0 */
+ EnclosingMark
+ /** Mc @stable ICU 2.0 */
+ CombiningSpacingMask
+ /** Nd @stable ICU 2.0 */
+ DecimalDigitNumber
+ /** Nl @stable ICU 2.0 */
+ LetterNumber
+ /** No @stable ICU 2.0 */
+ OtherNumber
+ /** Zs @stable ICU 2.0 */
+ SpaceSeparator
+ /** Zl @stable ICU 2.0 */
+ LineSeparator
+ /** Zp @stable ICU 2.0 */
+ ParagraphSeparator
+ /** Cc @stable ICU 2.0 */
+ ControlChar
+ /** Cf @stable ICU 2.0 */
+ FormatChar
+ /** Co @stable ICU 2.0 */
+ PrivateUseChar
+ /** Cs @stable ICU 2.0 */
+ Surrogate
+ /** Pd @stable ICU 2.0 */
+ DashPunctuation
+ /** Ps @stable ICU 2.0 */
+ StartPunctuation
+ /** Pe @stable ICU 2.0 */
+ EndPunctuation
+ /** Pc @stable ICU 2.0 */
+ ConnectorPunctuation
+ /** Po @stable ICU 2.0 */
+ OtherPunctuation
+ /** Sm @stable ICU 2.0 */
+ MathSymbol
+ /** Sc @stable ICU 2.0 */
+ CurrencySymbol
+ /** Sk @stable ICU 2.0 */
+ ModifierSymbol
+ /** So @stable ICU 2.0 */
+ OtherSymbol
+ /** Pi @stable ICU 2.0 */
+ InitialPunctuation
+ /** Pf @stable ICU 2.0 */
+ FinalPunctuation
+ /**
+ * One higher than the last enum UCharCategory constant.
+ * This numeric value is stable (will not change), see
+ * http://www.unicode.org/policies/stability_policy.html#Property_Value
+ *
+ * @stable ICU 2.0
+ */
+ CharCategoryCount
+)
+
+var (
+ GcCnMask = Mask(GeneralOtherTypes)
+
+ /** Mask constant for a UCharCategory. @stable ICU 2.1 */
+ GcLuMask = Mask(UppercaseLetter)
+ /** Mask constant for a UCharCategory. @stable ICU 2.1 */
+ GcLlMask = Mask(LowercaseLetter)
+ /** Mask constant for a UCharCategory. @stable ICU 2.1 */
+ GcLtMask = Mask(TitlecaseLetter)
+ /** Mask constant for a UCharCategory. @stable ICU 2.1 */
+ GcLmMask = Mask(ModifierLetter)
+ /** Mask constant for a UCharCategory. @stable ICU 2.1 */
+ GcLoMask = Mask(OtherLetter)
+
+ /** Mask constant for a UCharCategory. @stable ICU 2.1 */
+ GcMnMask = Mask(NonSpacingMask)
+ /** Mask constant for a UCharCategory. @stable ICU 2.1 */
+ GcMeMask = Mask(EnclosingMark)
+ /** Mask constant for a UCharCategory. @stable ICU 2.1 */
+ GcMcMask = Mask(CombiningSpacingMask)
+
+ /** Mask constant for a UCharCategory. @stable ICU 2.1 */
+ GcNdMask = Mask(DecimalDigitNumber)
+ /** Mask constant for a UCharCategory. @stable ICU 2.1 */
+ GcNlMask = Mask(LetterNumber)
+ /** Mask constant for a UCharCategory. @stable ICU 2.1 */
+ GcNoMask = Mask(OtherNumber)
+
+ /** Mask constant for a UCharCategory. @stable ICU 2.1 */
+ GcZsMask = Mask(SpaceSeparator)
+ /** Mask constant for a UCharCategory. @stable ICU 2.1 */
+ GcZlMask = Mask(LineSeparator)
+ /** Mask constant for a UCharCategory. @stable ICU 2.1 */
+ GcZpMask = Mask(ParagraphSeparator)
+
+ /** Mask constant for a UCharCategory. @stable ICU 2.1 */
+ GcCcMask = Mask(ControlChar)
+ /** Mask constant for a UCharCategory. @stable ICU 2.1 */
+ GcCfMask = Mask(FormatChar)
+ /** Mask constant for a UCharCategory. @stable ICU 2.1 */
+ GcCoMask = Mask(PrivateUseChar)
+ /** Mask constant for a UCharCategory. @stable ICU 2.1 */
+ GcCsMask = Mask(Surrogate)
+
+ /** Mask constant for a UCharCategory. @stable ICU 2.1 */
+ GcPdMask = Mask(DashPunctuation)
+ /** Mask constant for a UCharCategory. @stable ICU 2.1 */
+ GcPsMask = Mask(StartPunctuation)
+ /** Mask constant for a UCharCategory. @stable ICU 2.1 */
+ GcPeMask = Mask(EndPunctuation)
+ /** Mask constant for a UCharCategory. @stable ICU 2.1 */
+ GcPcMask = Mask(ConnectorPunctuation)
+ /** Mask constant for a UCharCategory. @stable ICU 2.1 */
+ GcPoMask = Mask(OtherPunctuation)
+
+ /** Mask constant for a UCharCategory. @stable ICU 2.1 */
+ GcSmMask = Mask(MathSymbol)
+ /** Mask constant for a UCharCategory. @stable ICU 2.1 */
+ GcScMask = Mask(CurrencySymbol)
+ /** Mask constant for a UCharCategory. @stable ICU 2.1 */
+ GcSkMask = Mask(ModifierSymbol)
+ /** Mask constant for a UCharCategory. @stable ICU 2.1 */
+ GcSoMask = Mask(OtherSymbol)
+
+ /** Mask constant for multiple UCharCategory bits (L Letters). @stable ICU 2.1 */
+ GcLMask = (GcLuMask | GcLlMask | GcLtMask | GcLmMask | GcLoMask)
+
+ /** Mask constant for multiple UCharCategory bits (LC Cased Letters). @stable ICU 2.1 */
+ GcLcMask = (GcLuMask | GcLlMask | GcLtMask)
+
+ /** Mask constant for multiple UCharCategory bits (M Marks). @stable ICU 2.1 */
+ GcMMask = (GcMnMask | GcMeMask | GcMcMask)
+
+ /** Mask constant for multiple UCharCategory bits (N Numbers). @stable ICU 2.1 */
+ GcNMask = (GcNdMask | GcNlMask | GcNoMask)
+
+ /** Mask constant for multiple UCharCategory bits (Z Separators). @stable ICU 2.1 */
+ GcZMask = (GcZsMask | GcZlMask | GcZpMask)
+)
+
+const upropsAgeShift = 24
+const maxVersionLength = 4
+const versionDelimiter = '.'
+
+type UVersionInfo [maxVersionLength]uint8
+
+const (
+ /** No numeric value. */
+ UPropsNtvNone = 0
+ /** Decimal digits: nv=0..9 */
+ UPropsNtvDecimalStart = 1
+ /** Other digits: nv=0..9 */
+ UPropsNtvDigitStart = 11
+ /** Small integers: nv=0..154 */
+ UPropsNtvNumericStart = 21
+ /** Fractions: ((ntv>>4)-12) / ((ntv&0xf)+1) = -1..17 / 1..16 */
+ UPropsNtvFractionStart = 0xb0
+ /**
+ * Large integers:
+ * ((ntv>>5)-14) * 10^((ntv&0x1f)+2) = (1..9)*(10^2..10^33)
+ * (only one significant decimal digit)
+ */
+ UPropsNtvLargeStart = 0x1e0
+ /**
+ * Sexagesimal numbers:
+ * ((ntv>>2)-0xbf) * 60^((ntv&3)+1) = (1..9)*(60^1..60^4)
+ */
+ UPropsNtvBase60Start = 0x300
+ /**
+ * Fraction-20 values:
+ * frac20 = ntv-0x324 = 0..0x17 -> 1|3|5|7 / 20|40|80|160|320|640
+ * numerator: num = 2*(frac20&3)+1
+ * denominator: den = 20<<(frac20>>2)
+ */
+ UPropsNtvFraction20Start = UPropsNtvBase60Start + 36 // 0x300+9*4=0x324
+ /**
+ * Fraction-32 values:
+ * frac32 = ntv-0x34c = 0..15 -> 1|3|5|7 / 32|64|128|256
+ * numerator: num = 2*(frac32&3)+1
+ * denominator: den = 32<<(frac32>>2)
+ */
+ UPropsNtvFraction32Start = UPropsNtvFraction20Start + 24 // 0x324+6*4=0x34c
+ /** No numeric value (yet). */
+ UPropsNtvReservedStart = UPropsNtvFraction32Start + 16 // 0x34c+4*4=0x35c
+
+ UPropsNtvMaxSmallInt = UPropsNtvFractionStart - UPropsNtvNumericStart - 1
+)
+
+const noNumericValue = -123456789.0
diff --git a/go/mysql/icuregex/internal/uchar/loader.go b/go/mysql/icuregex/internal/uchar/loader.go
new file mode 100644
index 00000000000..fab54f85e0a
--- /dev/null
+++ b/go/mysql/icuregex/internal/uchar/loader.go
@@ -0,0 +1,139 @@
+/*
+© 2016 and later: Unicode, Inc. and others.
+Copyright (C) 2004-2015, International Business Machines Corporation and others.
+Copyright 2023 The Vitess Authors.
+
+This file contains code derived from the Unicode Project's ICU library.
+License & terms of use for the original code: http://www.unicode.org/copyright.html
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package uchar
+
+import (
+ "errors"
+ "sync"
+
+ "vitess.io/vitess/go/mysql/icuregex/internal/icudata"
+ "vitess.io/vitess/go/mysql/icuregex/internal/udata"
+ "vitess.io/vitess/go/mysql/icuregex/internal/utrie"
+)
+
+var upropsOnce sync.Once
+var uprops struct {
+ trie *utrie.UTrie2
+ trie2 *utrie.UTrie2
+ vectorsColumns int32
+ vectors []uint32
+ scriptExtensions []uint16
+}
+
+func trie() *utrie.UTrie2 {
+ loadUProps()
+ return uprops.trie
+}
+
+func trie2() *utrie.UTrie2 {
+ loadUProps()
+ return uprops.trie2
+}
+
+func vectorsColumns() int32 {
+ loadUProps()
+ return uprops.vectorsColumns
+}
+
+func vectors() []uint32 {
+ loadUProps()
+ return uprops.vectors
+}
+
+func scriptExtensions() []uint16 {
+ loadUProps()
+ return uprops.scriptExtensions
+}
+
+func loadUProps() {
+ upropsOnce.Do(func() {
+ b := udata.NewBytes(icudata.UProps)
+ if err := readData(b); err != nil {
+ panic(err)
+ }
+ })
+}
+
+func readData(bytes *udata.Bytes) error {
+ err := bytes.ReadHeader(func(info *udata.DataInfo) bool {
+ return info.DataFormat[0] == 0x55 &&
+ info.DataFormat[1] == 0x50 &&
+ info.DataFormat[2] == 0x72 &&
+ info.DataFormat[3] == 0x6f &&
+ info.FormatVersion[0] == 7
+ })
+ if err != nil {
+ return err
+ }
+
+ propertyOffset := bytes.Int32()
+ /* exceptionOffset = */ bytes.Int32()
+ /* caseOffset = */ bytes.Int32()
+ additionalOffset := bytes.Int32()
+ additionalVectorsOffset := bytes.Int32()
+ uprops.vectorsColumns = bytes.Int32()
+ scriptExtensionsOffset := bytes.Int32()
+ reservedOffset7 := bytes.Int32()
+ /* reservedOffset8 = */ bytes.Int32()
+ /* dataTopOffset = */ bytes.Int32()
+ _ = bytes.Int32()
+ _ = bytes.Int32()
+ bytes.Skip((16 - 12) << 2)
+
+ uprops.trie, err = utrie.UTrie2FromBytes(bytes)
+ if err != nil {
+ return err
+ }
+
+ expectedTrieLength := (propertyOffset - 16) * 4
+ trieLength := uprops.trie.SerializedLength()
+
+ if trieLength > expectedTrieLength {
+ return errors.New("ucase.icu: not enough bytes for the trie")
+ }
+
+ bytes.Skip(expectedTrieLength - trieLength)
+ bytes.Skip((additionalOffset - propertyOffset) * 4)
+
+ if uprops.vectorsColumns > 0 {
+ uprops.trie2, err = utrie.UTrie2FromBytes(bytes)
+ if err != nil {
+ return err
+ }
+
+ expectedTrieLength = (additionalVectorsOffset - additionalOffset) * 4
+ trieLength = uprops.trie2.SerializedLength()
+
+ if trieLength > expectedTrieLength {
+ return errors.New("ucase.icu: not enough bytes for the trie")
+ }
+
+ bytes.Skip(expectedTrieLength - trieLength)
+ uprops.vectors = bytes.Uint32Slice(scriptExtensionsOffset - additionalVectorsOffset)
+ }
+
+ if n := (reservedOffset7 - scriptExtensionsOffset) * 2; n > 0 {
+ uprops.scriptExtensions = bytes.Uint16Slice(n)
+ }
+
+ return nil
+}
diff --git a/go/mysql/icuregex/internal/uchar/uchar.go b/go/mysql/icuregex/internal/uchar/uchar.go
new file mode 100644
index 00000000000..e93b51d9bb4
--- /dev/null
+++ b/go/mysql/icuregex/internal/uchar/uchar.go
@@ -0,0 +1,316 @@
+/*
+© 2016 and later: Unicode, Inc. and others.
+Copyright (C) 2004-2015, International Business Machines Corporation and others.
+Copyright 2023 The Vitess Authors.
+
+This file contains code derived from the Unicode Project's ICU library.
+License & terms of use for the original code: http://www.unicode.org/copyright.html
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package uchar
+
+import (
+ "strconv"
+)
+
+type PropertySet interface {
+ AddRune(ch rune)
+}
+
+func VecAddPropertyStarts(sa PropertySet) {
+ trie2().Enum(nil, func(start, _ rune, _ uint32) bool {
+ sa.AddRune(start)
+ return true
+ })
+}
+
+const (
+ tab = 0x0009
+ lf = 0x000a
+ ff = 0x000c
+ cr = 0x000d
+ nbsp = 0x00a0
+ cgj = 0x034f
+ figuresp = 0x2007
+ hairsp = 0x200a
+ zwnj = 0x200c
+ zwj = 0x200d
+ rlm = 0x200f
+ nnbsp = 0x202f
+ zwnbsp = 0xfef
+)
+
+func AddPropertyStarts(sa PropertySet) {
+ /* add the start code point of each same-value range of the main trie */
+ trie().Enum(nil, func(start, _ rune, _ uint32) bool {
+ sa.AddRune(start)
+ return true
+ })
+
+ /* add code points with hardcoded properties, plus the ones following them */
+
+ /* add for u_isblank() */
+ sa.AddRune(tab)
+ sa.AddRune(tab + 1)
+
+ /* add for IS_THAT_CONTROL_SPACE() */
+ sa.AddRune(cr + 1) /* range TAB..CR */
+ sa.AddRune(0x1c)
+ sa.AddRune(0x1f + 1)
+ sa.AddRune(0x85) // NEXT LINE (NEL)
+ sa.AddRune(0x85 + 1)
+
+ /* add for u_isIDIgnorable() what was not added above */
+ sa.AddRune(0x7f) /* range DEL..NBSP-1, NBSP added below */
+ sa.AddRune(hairsp)
+ sa.AddRune(rlm + 1)
+ sa.AddRune(0x206a) // INHIBIT SYMMETRIC SWAPPING
+ sa.AddRune(0x206f + 1) // NOMINAL DIGIT SHAPES
+ sa.AddRune(zwnbsp)
+ sa.AddRune(zwnbsp + 1)
+
+ /* add no-break spaces for u_isWhitespace() what was not added above */
+ sa.AddRune(nbsp)
+ sa.AddRune(nbsp + 1)
+ sa.AddRune(figuresp)
+ sa.AddRune(figuresp + 1)
+ sa.AddRune(nnbsp)
+ sa.AddRune(nnbsp + 1)
+
+ /* add for u_digit() */
+ sa.AddRune('a')
+ sa.AddRune('z' + 1)
+ sa.AddRune('A')
+ sa.AddRune('Z' + 1)
+ // fullwidth
+ sa.AddRune('a')
+ sa.AddRune('z' + 1)
+ sa.AddRune('A')
+ sa.AddRune('Z' + 1)
+
+ /* add for u_isxdigit() */
+ sa.AddRune('f' + 1)
+ sa.AddRune('F' + 1)
+ // fullwidth
+ sa.AddRune('f' + 1)
+ sa.AddRune('F' + 1)
+
+ /* add for UCHAR_DEFAULT_IGNORABLE_CODE_POINT what was not added above */
+ sa.AddRune(0x2060) /* range 2060..206f */
+ sa.AddRune(0xfff0)
+ sa.AddRune(0xfffb + 1)
+ sa.AddRune(0xe0000)
+ sa.AddRune(0xe0fff + 1)
+
+ /* add for UCHAR_GRAPHEME_BASE and others */
+ sa.AddRune(cgj)
+ sa.AddRune(cgj + 1)
+}
+
+func CharType(c rune) Category {
+ props := trie().Get16(c)
+ return getCategory(props)
+}
+
+func getCategory(props uint16) Category {
+ return Category(props & 0x1f)
+}
+
+func GetUnicodeProperties(c rune, column int) uint32 {
+ if column >= int(vectorsColumns()) {
+ return 0
+ }
+ vecIndex := trie2().Get16(c)
+ return vectors()[int(vecIndex)+column]
+}
+
+func ScriptExtension(idx uint32) uint16 {
+ return scriptExtensions()[idx]
+}
+
+func ScriptExtensions(idx uint32) []uint16 {
+ return scriptExtensions()[idx:]
+}
+
+func IsDigit(c rune) bool {
+ return CharType(c) == DecimalDigitNumber
+}
+
+func IsPOSIXPrint(c rune) bool {
+ return CharType(c) == SpaceSeparator || IsGraphPOSIX(c)
+}
+
+func IsGraphPOSIX(c rune) bool {
+ props := trie().Get16(c)
+ /* \p{space}\p{gc=Control} == \p{gc=Z}\p{Control} */
+ /* comparing ==0 returns FALSE for the categories mentioned */
+ return Mask(getCategory(props))&(GcCcMask|GcCsMask|GcCnMask|GcZMask) == 0
+}
+
+func IsXDigit(c rune) bool {
+ /* check ASCII and Fullwidth ASCII a-fA-F */
+ if (c <= 0x66 && c >= 0x41 && (c <= 0x46 || c >= 0x61)) ||
+ (c >= 0xff21 && c <= 0xff46 && (c <= 0xff26 || c >= 0xff41)) {
+ return true
+ }
+ return IsDigit(c)
+}
+
+func IsBlank(c rune) bool {
+ if c <= 0x9f {
+ return c == 9 || c == 0x20 /* TAB or SPACE */
+ }
+ /* Zs */
+ return CharType(c) == SpaceSeparator
+}
+
+func CharAge(c rune) UVersionInfo {
+ version := GetUnicodeProperties(c, 0) >> upropsAgeShift
+ return UVersionInfo{uint8(version >> 4), uint8(version & 0xf), 0, 0}
+}
+
+func VersionFromString(str string) (version UVersionInfo) {
+ part := 0
+ for len(str) > 0 && part < maxVersionLength {
+ if str[0] == versionDelimiter {
+ str = str[1:]
+ }
+ str, version[part] = parseInt(str)
+ part++
+ }
+ return
+}
+
+// parseInt is simplified but aims to mimic strtoul usage
+// as it is used for ICU version parsing.
+func parseInt(str string) (string, uint8) {
+ if str == "" {
+ return str, 0
+ }
+
+ start := 0
+ end := 0
+whitespace:
+ for i := 0; i < len(str); i++ {
+ switch str[i] {
+ case ' ', '\f', '\n', '\r', '\t', '\v':
+ start++
+ continue
+ default:
+ break whitespace
+ }
+ }
+ str = str[start:]
+
+ for i := 0; i < len(str); i++ {
+ if str[i] < '0' || str[i] > '9' {
+ end = i
+ break
+ }
+ end++
+ }
+
+ val, err := strconv.ParseUint(str[start:end], 10, 8)
+ if err != nil {
+ return str[end:], 0
+ }
+ return str[end:], uint8(val)
+}
+
+const upropsNumericTypeValueShift = 6
+
+func NumericTypeValue(c rune) uint16 {
+ props := trie().Get16(c)
+ return props >> upropsNumericTypeValueShift
+}
+
+func NumericValue(c rune) float64 {
+ ntv := int32(NumericTypeValue(c))
+
+ if ntv == UPropsNtvNone {
+ return noNumericValue
+ } else if ntv < UPropsNtvDigitStart {
+ /* decimal digit */
+ return float64(ntv - UPropsNtvDecimalStart)
+ } else if ntv < UPropsNtvNumericStart {
+ /* other digit */
+ return float64(ntv - UPropsNtvDigitStart)
+ } else if ntv < UPropsNtvFractionStart {
+ /* small integer */
+ return float64(ntv - UPropsNtvNumericStart)
+ } else if ntv < UPropsNtvLargeStart {
+ /* fraction */
+ numerator := (ntv >> 4) - 12
+ denominator := (ntv & 0xf) + 1
+ return float64(numerator) / float64(denominator)
+ } else if ntv < UPropsNtvBase60Start {
+ /* large, single-significant-digit integer */
+ mant := (ntv >> 5) - 14
+ exp := (ntv & 0x1f) + 2
+ numValue := float64(mant)
+
+ /* multiply by 10^exp without math.h */
+ for exp >= 4 {
+ numValue *= 10000.
+ exp -= 4
+ }
+ switch exp {
+ case 3:
+ numValue *= 1000.0
+ case 2:
+ numValue *= 100.0
+ case 1:
+ numValue *= 10.0
+ case 0:
+ default:
+ }
+
+ return numValue
+ } else if ntv < UPropsNtvFraction20Start {
+ /* sexagesimal (base 60) integer */
+ numValue := (ntv >> 2) - 0xbf
+ exp := (ntv & 3) + 1
+
+ switch exp {
+ case 4:
+ numValue *= 60 * 60 * 60 * 60
+ case 3:
+ numValue *= 60 * 60 * 60
+ case 2:
+ numValue *= 60 * 60
+ case 1:
+ numValue *= 60
+ case 0:
+ default:
+ }
+
+ return float64(numValue)
+ } else if ntv < UPropsNtvFraction32Start {
+ // fraction-20 e.g. 3/80
+ frac20 := ntv - UPropsNtvFraction20Start // 0..0x17
+ numerator := 2*(frac20&3) + 1
+ denominator := 20 << (frac20 >> 2)
+ return float64(numerator) / float64(denominator)
+ } else if ntv < UPropsNtvReservedStart {
+ // fraction-32 e.g. 3/64
+ frac32 := ntv - UPropsNtvFraction32Start // 0..15
+ numerator := 2*(frac32&3) + 1
+ denominator := 32 << (frac32 >> 2)
+ return float64(numerator) / float64(denominator)
+ } else {
+ /* reserved */
+ return noNumericValue
+ }
+}
diff --git a/go/mysql/icuregex/internal/udata/udata.go b/go/mysql/icuregex/internal/udata/udata.go
new file mode 100644
index 00000000000..f20f8be1efa
--- /dev/null
+++ b/go/mysql/icuregex/internal/udata/udata.go
@@ -0,0 +1,155 @@
+/*
+© 2016 and later: Unicode, Inc. and others.
+Copyright (C) 2004-2015, International Business Machines Corporation and others.
+Copyright 2023 The Vitess Authors.
+
+This file contains code derived from the Unicode Project's ICU library.
+License & terms of use for the original code: http://www.unicode.org/copyright.html
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package udata
+
+import (
+ "encoding/binary"
+ "errors"
+ "unsafe"
+)
+
+type DataInfo struct {
+ /** sizeof(UDataInfo)
+ * @stable ICU 2.0 */
+ Size uint16
+
+ /** unused, set to 0
+ * @stable ICU 2.0*/
+ ReservedWord uint16
+
+ /* platform data properties */
+ /** 0 for little-endian machine, 1 for big-endian
+ * @stable ICU 2.0 */
+ IsBigEndian uint8
+
+ /** see U_CHARSET_FAMILY values in utypes.h
+ * @stable ICU 2.0*/
+ CharsetFamily uint8
+
+ /** sizeof(UChar), one of { 1, 2, 4 }
+ * @stable ICU 2.0*/
+ SizeofUChar uint8
+
+ /** unused, set to 0
+ * @stable ICU 2.0*/
+ ReservedByte uint8
+
+ /** data format identifier
+ * @stable ICU 2.0*/
+ DataFormat [4]uint8
+
+ /** versions: [0] major [1] minor [2] milli [3] micro
+ * @stable ICU 2.0*/
+ FormatVersion [4]uint8
+
+ /** versions: [0] major [1] minor [2] milli [3] micro
+ * @stable ICU 2.0*/
+ DataVersion [4]uint8
+}
+
+type Bytes struct {
+ buf []byte
+ orig []byte
+ enc binary.ByteOrder
+}
+
+func NewBytes(b []byte) *Bytes {
+ return &Bytes{buf: b, orig: b, enc: binary.LittleEndian}
+}
+
+func (b *Bytes) ReadHeader(isValid func(info *DataInfo) bool) error {
+ type MappedData struct {
+ headerSize uint16
+ magic1 uint8
+ magic2 uint8
+ }
+
+ type DataHeader struct {
+ dataHeader MappedData
+ info DataInfo
+ }
+
+ data := unsafe.SliceData(b.buf)
+ header := (*DataHeader)(unsafe.Pointer(data))
+
+ if header.dataHeader.magic1 != 0xda || header.dataHeader.magic2 != 0x27 {
+ return errors.New("invalid magic number")
+ }
+
+ if header.info.IsBigEndian != 0 {
+ return errors.New("unsupported: BigEndian data source")
+ }
+
+ if !isValid(&header.info) {
+ return errors.New("failed to validate data header")
+ }
+
+ b.buf = b.buf[header.dataHeader.headerSize:]
+ return nil
+}
+
+func (b *Bytes) Uint8() uint8 {
+ u := b.buf[0]
+ b.buf = b.buf[1:]
+ return u
+}
+func (b *Bytes) Uint16() uint16 {
+ u := b.enc.Uint16(b.buf)
+ b.buf = b.buf[2:]
+ return u
+}
+
+func (b *Bytes) Uint16Slice(size int32) []uint16 {
+ s := unsafe.Slice((*uint16)(unsafe.Pointer(unsafe.SliceData(b.buf))), size)
+ b.buf = b.buf[2*size:]
+ return s
+}
+
+func (b *Bytes) Uint32Slice(size int32) []uint32 {
+ s := unsafe.Slice((*uint32)(unsafe.Pointer(unsafe.SliceData(b.buf))), size)
+ b.buf = b.buf[4*size:]
+ return s
+}
+
+func (b *Bytes) Uint32() uint32 {
+ u := b.enc.Uint32(b.buf)
+ b.buf = b.buf[4:]
+ return u
+}
+
+func (b *Bytes) Int32() int32 {
+ return int32(b.Uint32())
+}
+
+func (b *Bytes) Skip(size int32) {
+ b.buf = b.buf[size:]
+}
+
+func (b *Bytes) Uint8Slice(n int32) []uint8 {
+ s := b.buf[:n]
+ b.buf = b.buf[n:]
+ return s
+}
+
+func (b *Bytes) Position() int32 {
+ return int32(len(b.orig) - len(b.buf))
+}
diff --git a/go/mysql/icuregex/internal/uemoji/loader.go b/go/mysql/icuregex/internal/uemoji/loader.go
new file mode 100644
index 00000000000..7015491d069
--- /dev/null
+++ b/go/mysql/icuregex/internal/uemoji/loader.go
@@ -0,0 +1,69 @@
+/*
+© 2016 and later: Unicode, Inc. and others.
+Copyright (C) 2004-2015, International Business Machines Corporation and others.
+Copyright 2023 The Vitess Authors.
+
+This file contains code derived from the Unicode Project's ICU library.
+License & terms of use for the original code: http://www.unicode.org/copyright.html
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package uemoji
+
+import (
+ "sync"
+
+ "vitess.io/vitess/go/mysql/icuregex/internal/icudata"
+ "vitess.io/vitess/go/mysql/icuregex/internal/udata"
+ "vitess.io/vitess/go/mysql/icuregex/internal/utrie"
+)
+
+var uemojiOnce sync.Once
+var uemoji struct {
+ trie *utrie.UcpTrie
+}
+
+func loadUEmoji() {
+ uemojiOnce.Do(func() {
+ b := udata.NewBytes(icudata.UEmoji)
+ if err := readData(b); err != nil {
+ panic(err)
+ }
+ })
+}
+
+func trie() *utrie.UcpTrie {
+ loadUEmoji()
+ return uemoji.trie
+}
+
+func readData(bytes *udata.Bytes) error {
+ err := bytes.ReadHeader(func(info *udata.DataInfo) bool {
+ return info.DataFormat[0] == 0x45 &&
+ info.DataFormat[1] == 0x6d &&
+ info.DataFormat[2] == 0x6f &&
+ info.DataFormat[3] == 0x6a &&
+ info.FormatVersion[0] == 1
+ })
+ if err != nil {
+ return err
+ }
+
+ bytes.Skip(bytes.Int32() - 4)
+ uemoji.trie, err = utrie.UcpTrieFromBytes(bytes)
+ if err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/go/mysql/icuregex/internal/uemoji/uemoji.go b/go/mysql/icuregex/internal/uemoji/uemoji.go
new file mode 100644
index 00000000000..5cc89acd69a
--- /dev/null
+++ b/go/mysql/icuregex/internal/uemoji/uemoji.go
@@ -0,0 +1,82 @@
+/*
+© 2016 and later: Unicode, Inc. and others.
+Copyright (C) 2004-2015, International Business Machines Corporation and others.
+Copyright 2023 The Vitess Authors.
+
+This file contains code derived from the Unicode Project's ICU library.
+License & terms of use for the original code: http://www.unicode.org/copyright.html
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package uemoji
+
+import (
+ "vitess.io/vitess/go/mysql/icuregex/internal/utrie"
+)
+
+type propertySet interface {
+ AddRune(ch rune)
+ AddRuneRange(from rune, to rune)
+}
+
+func AddPropertyStarts(sa propertySet) {
+ // Add the start code point of each same-value range of the trie.
+ var start, end rune
+ for {
+ end, _ = trie().GetRange(start, utrie.UcpMapRangeNormal, 0, nil)
+ if end < 0 {
+ break
+ }
+ sa.AddRune(start)
+ start = end + 1
+ }
+}
+
+const (
+ bitEmoji = 0
+ bitEmojiPresentation = 1
+ bitEmojiModifier = 2
+ bitEmojiModifierBase = 3
+ bitEmojiComponent = 4
+ bitExtendedPictographic = 5
+ bitBasicEmoji = 6
+)
+
+// Note: REGIONAL_INDICATOR is a single, hardcoded range implemented elsewhere.
+var bitFlags = []int8{
+ bitEmoji,
+ bitEmojiPresentation,
+ bitEmojiModifier,
+ bitEmojiModifierBase,
+ bitEmojiComponent,
+ -1,
+ -1,
+ bitExtendedPictographic,
+ bitBasicEmoji,
+ -1,
+ -1,
+ -1,
+ -1,
+ -1,
+ bitBasicEmoji,
+}
+
+func HasBinaryProperty(c rune, which int) bool {
+ bit := bitFlags[which]
+ if bit < 0 {
+ return false // not a property that we support in this function
+ }
+ bits := trie().Get(c)
+ return ((bits >> bit) & 1) != 0
+}
diff --git a/go/mysql/icuregex/internal/ulayout/ulayout.go b/go/mysql/icuregex/internal/ulayout/ulayout.go
new file mode 100644
index 00000000000..dbf21d9460b
--- /dev/null
+++ b/go/mysql/icuregex/internal/ulayout/ulayout.go
@@ -0,0 +1,128 @@
+/*
+© 2016 and later: Unicode, Inc. and others.
+Copyright (C) 2004-2015, International Business Machines Corporation and others.
+Copyright 2023 The Vitess Authors.
+
+This file contains code derived from the Unicode Project's ICU library.
+License & terms of use for the original code: http://www.unicode.org/copyright.html
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package ulayout
+
+import (
+ "errors"
+ "sync"
+
+ "vitess.io/vitess/go/mysql/icuregex/internal/icudata"
+ "vitess.io/vitess/go/mysql/icuregex/internal/udata"
+ "vitess.io/vitess/go/mysql/icuregex/internal/utrie"
+)
+
+var inpcTrie *utrie.UcpTrie
+var inscTrie *utrie.UcpTrie
+var voTrie *utrie.UcpTrie
+
+const (
+ ixInpcTrieTop = 1
+ ixInscTrieTop = 2
+ ixVoTrieTop = 3
+
+ ixCount = 12
+)
+
+func InpcTrie() *utrie.UcpTrie {
+ loadLayouts()
+ return inpcTrie
+}
+
+func InscTrie() *utrie.UcpTrie {
+ loadLayouts()
+ return inscTrie
+}
+
+func VoTrie() *utrie.UcpTrie {
+ loadLayouts()
+ return voTrie
+}
+
+var layoutsOnce sync.Once
+
+func loadLayouts() {
+ layoutsOnce.Do(func() {
+ b := udata.NewBytes(icudata.ULayout)
+ if err := readData(b); err != nil {
+ panic(err)
+ }
+ })
+}
+
+func readData(bytes *udata.Bytes) error {
+ err := bytes.ReadHeader(func(info *udata.DataInfo) bool {
+ return info.DataFormat[0] == 0x4c &&
+ info.DataFormat[1] == 0x61 &&
+ info.DataFormat[2] == 0x79 &&
+ info.DataFormat[3] == 0x6f &&
+ info.FormatVersion[0] == 1
+ })
+ if err != nil {
+ return err
+ }
+
+ startPos := bytes.Position()
+ indexesLength := int32(bytes.Uint32()) // inIndexes[IX_INDEXES_LENGTH]
+ if indexesLength < ixCount {
+ return errors.New("text layout properties data: not enough indexes")
+ }
+ index := make([]int32, indexesLength)
+ index[0] = indexesLength
+ for i := int32(1); i < indexesLength; i++ {
+ index[i] = int32(bytes.Uint32())
+ }
+
+ offset := indexesLength * 4
+ top := index[ixInpcTrieTop]
+ trieSize := top - offset
+ if trieSize >= 16 {
+ inpcTrie, err = utrie.UcpTrieFromBytes(bytes)
+ if err != nil {
+ return err
+ }
+ }
+
+ pos := bytes.Position() - startPos
+ bytes.Skip(top - pos)
+ offset = top
+ top = index[ixInscTrieTop]
+ trieSize = top - offset
+ if trieSize >= 16 {
+ inscTrie, err = utrie.UcpTrieFromBytes(bytes)
+ if err != nil {
+ return err
+ }
+ }
+
+ pos = bytes.Position() - startPos
+ bytes.Skip(top - pos)
+ offset = top
+ top = index[ixVoTrieTop]
+ trieSize = top - offset
+ if trieSize >= 16 {
+ voTrie, err = utrie.UcpTrieFromBytes(bytes)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/go/mysql/icuregex/internal/unames/loader.go b/go/mysql/icuregex/internal/unames/loader.go
new file mode 100644
index 00000000000..296670b1c66
--- /dev/null
+++ b/go/mysql/icuregex/internal/unames/loader.go
@@ -0,0 +1,90 @@
+/*
+© 2016 and later: Unicode, Inc. and others.
+Copyright (C) 2004-2015, International Business Machines Corporation and others.
+Copyright 2023 The Vitess Authors.
+
+This file contains code derived from the Unicode Project's ICU library.
+License & terms of use for the original code: http://www.unicode.org/copyright.html
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unames
+
+import (
+ "sync"
+
+ "vitess.io/vitess/go/mysql/icuregex/internal/icudata"
+ "vitess.io/vitess/go/mysql/icuregex/internal/udata"
+)
+
+var charNamesOnce sync.Once
+var charNames *unames
+
+type unames struct {
+ tokens []uint16
+ tokenStrings []uint8
+ groups []uint16
+ groupNames []uint8
+ algNames []algorithmicRange
+}
+
+func loadCharNames() {
+ charNamesOnce.Do(func() {
+ b := udata.NewBytes(icudata.UNames)
+ if err := b.ReadHeader(func(info *udata.DataInfo) bool {
+ return info.Size >= 20 &&
+ info.IsBigEndian == 0 &&
+ info.CharsetFamily == 0 &&
+ info.DataFormat[0] == 0x75 && /* dataFormat="unam" */
+ info.DataFormat[1] == 0x6e &&
+ info.DataFormat[2] == 0x61 &&
+ info.DataFormat[3] == 0x6d &&
+ info.FormatVersion[0] == 1
+ }); err != nil {
+ panic(err)
+ }
+
+ tokenStringOffset := int32(b.Uint32() - 16)
+ groupsOffset := int32(b.Uint32() - 16)
+ groupStringOffset := int32(b.Uint32() - 16)
+ algNamesOffset := int32(b.Uint32() - 16)
+ charNames = &unames{
+ tokens: b.Uint16Slice(tokenStringOffset / 2),
+ tokenStrings: b.Uint8Slice(groupsOffset - tokenStringOffset),
+ groups: b.Uint16Slice((groupStringOffset - groupsOffset) / 2),
+ groupNames: b.Uint8Slice(algNamesOffset - groupStringOffset),
+ }
+
+ algCount := b.Uint32()
+ charNames.algNames = make([]algorithmicRange, 0, algCount)
+
+ for i := uint32(0); i < algCount; i++ {
+ ar := algorithmicRange{
+ start: b.Uint32(),
+ end: b.Uint32(),
+ typ: b.Uint8(),
+ variant: b.Uint8(),
+ }
+ size := b.Uint16()
+ switch ar.typ {
+ case 0:
+ ar.s = b.Uint8Slice(int32(size) - 12)
+ case 1:
+ ar.factors = b.Uint16Slice(int32(ar.variant))
+ ar.s = b.Uint8Slice(int32(size) - 12 - int32(ar.variant)*2)
+ }
+ charNames.algNames = append(charNames.algNames, ar)
+ }
+ })
+}
diff --git a/go/mysql/icuregex/internal/unames/unames.go b/go/mysql/icuregex/internal/unames/unames.go
new file mode 100644
index 00000000000..66e8ba15615
--- /dev/null
+++ b/go/mysql/icuregex/internal/unames/unames.go
@@ -0,0 +1,406 @@
+/*
+© 2016 and later: Unicode, Inc. and others.
+Copyright (C) 2004-2015, International Business Machines Corporation and others.
+Copyright 2023 The Vitess Authors.
+
+This file contains code derived from the Unicode Project's ICU library.
+License & terms of use for the original code: http://www.unicode.org/copyright.html
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unames
+
+import (
+ "bytes"
+ "strconv"
+ "strings"
+)
+
+func (names *unames) getGroupName(group []uint16) []uint8 {
+ return names.groupNames[names.getGroupOffset(group):]
+}
+
+type NameChoice int32
+
+const (
+ UnicodeCharName NameChoice = iota
+ /**
+ * The Unicode_1_Name property value which is of little practical value.
+ * Beginning with ICU 49, ICU APIs return an empty string for this name choice.
+ * @deprecated ICU 49
+ */
+ Unicode10CharName
+ /** Standard or synthetic character name. @stable ICU 2.0 */
+ ExtendedCharName
+ /** Corrected name from NameAliases.txt. @stable ICU 4.4 */
+ CharNameAlias
+)
+
+type algorithmicRange struct {
+ start, end uint32
+ typ, variant uint8
+ factors []uint16
+ s []uint8
+}
+
+func (ar *algorithmicRange) findAlgName(otherName string) rune {
+ switch ar.typ {
+ case 0:
+ s := ar.s
+
+ for s[0] != 0 && len(otherName) > 0 {
+ if s[0] != otherName[0] {
+ return -1
+ }
+ s = s[1:]
+ otherName = otherName[1:]
+ }
+
+ var code rune
+ count := int(ar.variant)
+ for i := 0; i < count && len(otherName) > 0; i++ {
+ c := rune(otherName[0])
+ otherName = otherName[1:]
+ if '0' <= c && c <= '9' {
+ code = (code << 4) | (c - '0')
+ } else if 'A' <= c && c <= 'F' {
+ code = (code << 4) | (c - 'A' + 10)
+ } else {
+ return -1
+ }
+ }
+
+ if len(otherName) == 0 && ar.start <= uint32(code) && uint32(code) <= ar.end {
+ return code
+ }
+ case 1:
+ factors := ar.factors
+ s := ar.s
+
+ for s[0] != 0 && len(otherName) > 0 {
+ if s[0] != otherName[0] {
+ return -1
+ }
+ s = s[1:]
+ otherName = otherName[1:]
+ }
+ s = s[1:]
+
+ start := rune(ar.start)
+ limit := rune(ar.end + 1)
+
+ var indexes [8]uint16
+ var buf strings.Builder
+ var elements [8][]byte
+ var elementBases [8][]byte
+
+ ar.writeFactorSuffix0(factors, s, &buf, &elements, &elementBases)
+ if buf.String() == otherName {
+ return start
+ }
+
+ for start+1 < limit {
+ start++
+ i := len(factors)
+
+ for {
+ i--
+ idx := indexes[i] + 1
+ if idx < factors[i] {
+ indexes[i] = idx
+ s = elements[i]
+ s = s[bytes.IndexByte(s, 0)+1:]
+ elements[i] = s
+ break
+ }
+
+ indexes[i] = 0
+ elements[i] = elementBases[i]
+ }
+
+ t := otherName
+ for i = 0; i < len(factors); i++ {
+ s = elements[i]
+
+ for s[0] != 0 && len(t) > 0 {
+ if s[0] != t[0] {
+ s = nil
+ i = 99
+ break
+ }
+ s = s[1:]
+ t = t[1:]
+ }
+ }
+ if i < 99 && len(t) == 0 {
+ return start
+ }
+ }
+ }
+ return -1
+}
+
+func (ar *algorithmicRange) writeFactorSuffix0(factors []uint16, s []uint8, buf *strings.Builder, elements, elementBases *[8][]byte) {
+ /* write each element */
+ for i := 0; i < len(factors); i++ {
+ (*elements)[i] = s
+ (*elementBases)[i] = s
+
+ nul := bytes.IndexByte(s, 0)
+ buf.Write(s[:nul])
+ s = s[nul+1:]
+
+ factor := int(factors[i] - 1)
+ for factor > 0 {
+ s = s[bytes.IndexByte(s, 0)+1:]
+ factor--
+ }
+ }
+}
+
+func CharForName(nameChoice NameChoice, name string) rune {
+ loadCharNames()
+
+ lower := strings.ToLower(name)
+ upper := strings.ToUpper(name)
+
+ if lower[0] == '<' {
+ if nameChoice == ExtendedCharName && lower[len(lower)-1] == '>' {
+ if limit := strings.LastIndexByte(lower, '-'); limit >= 2 {
+ cp, err := strconv.ParseUint(lower[limit+1:len(lower)-1], 16, 32)
+ if err != nil || cp > 0x10ffff {
+ return -1
+ }
+ return rune(cp)
+ }
+ }
+ return -1
+ }
+
+ for _, ar := range charNames.algNames {
+ if cp := ar.findAlgName(upper); cp != -1 {
+ return cp
+ }
+ }
+
+ return charNames.enumNames(0, 0x10ffff+1, upper, nameChoice)
+}
+
+const groupShift = 5
+const linesPerGroup = 1 << groupShift
+const groupMask = linesPerGroup - 1
+
+const (
+ groupMsb = iota
+ groupOffsetHigh
+ groupOffsetLow
+ groupLength
+)
+
+func (names *unames) enumNames(start, limit rune, otherName string, nameChoice NameChoice) rune {
+ startGroupMSB := uint16(start >> groupShift)
+ endGroupMSB := uint16((limit - 1) >> groupShift)
+
+ group := names.getGroup(start)
+
+ if startGroupMSB < group[groupMsb] && nameChoice == ExtendedCharName {
+ extLimit := rune(group[groupMsb]) << groupShift
+ if extLimit > limit {
+ extLimit = limit
+ }
+ start = extLimit
+ }
+
+ if startGroupMSB == endGroupMSB {
+ if startGroupMSB == group[groupMsb] {
+ return names.enumGroupNames(group, start, limit-1, otherName, nameChoice)
+ }
+ } else {
+ if startGroupMSB == group[groupMsb] {
+ if start&groupMask != 0 {
+ if cp := names.enumGroupNames(group, start, (rune(startGroupMSB)< group[groupMsb] {
+ group = group[groupLength:]
+ }
+
+ for len(group) > 0 && group[groupMsb] < endGroupMSB {
+ start = rune(group[groupMsb]) << groupShift
+ if cp := names.enumGroupNames(group, start, start+linesPerGroup-1, otherName, nameChoice); cp != -1 {
+ return cp
+ }
+ group = group[groupLength:]
+ }
+
+ if len(group) > 0 && group[groupMsb] == endGroupMSB {
+ return names.enumGroupNames(group, (limit-1)&^groupMask, limit-1, otherName, nameChoice)
+ }
+ }
+
+ return -1
+}
+
+func (names *unames) getGroup(code rune) []uint16 {
+ groups := names.groups
+ groupMSB := uint16(code >> groupShift)
+
+ start := 0
+ groupCount := int(groups[0])
+ limit := groupCount
+ groups = groups[1:]
+
+ for start < limit-1 {
+ number := (start + limit) / 2
+ if groupMSB < groups[number*groupLength+groupMsb] {
+ limit = number
+ } else {
+ start = number
+ }
+ }
+
+ return groups[start*groupLength : (groupCount-start)*groupLength]
+}
+
+func (names *unames) getGroupOffset(group []uint16) uint32 {
+ return (uint32(group[groupOffsetHigh]) << 16) | uint32(group[groupOffsetLow])
+}
+
+func (names *unames) enumGroupNames(group []uint16, start, end rune, otherName string, choice NameChoice) rune {
+ var offsets [linesPerGroup + 2]uint16
+ var lengths [linesPerGroup + 2]uint16
+
+ s := names.getGroupName(group)
+ s = expandGroupLengths(s, offsets[:0], lengths[:0])
+
+ for start < end {
+ name := s[offsets[start&groupMask]:]
+ nameLen := lengths[start&groupMask]
+ if names.compareName(name[:nameLen], choice, otherName) {
+ return start
+ }
+ start++
+ }
+ return -1
+}
+
+func expandGroupLengths(s []uint8, offsets []uint16, lengths []uint16) []uint8 {
+ /* read the lengths of the 32 strings in this group and get each string's offset */
+ var i, offset, length uint16
+ var lengthByte uint8
+
+ /* all 32 lengths must be read to get the offset of the first group string */
+ for i < linesPerGroup {
+ lengthByte = s[0]
+ s = s[1:]
+
+ /* read even nibble - MSBs of lengthByte */
+ if length >= 12 {
+ /* double-nibble length spread across two bytes */
+ length = ((length&0x3)<<4 | uint16(lengthByte)>>4) + 12
+ lengthByte &= 0xf
+ } else if (lengthByte /* &0xf0 */) >= 0xc0 {
+ /* double-nibble length spread across this one byte */
+ length = (uint16(lengthByte) & 0x3f) + 12
+ } else {
+ /* single-nibble length in MSBs */
+ length = uint16(lengthByte) >> 4
+ lengthByte &= 0xf
+ }
+
+ offsets = append(offsets, offset)
+ lengths = append(lengths, length)
+
+ offset += length
+ i++
+
+ /* read odd nibble - LSBs of lengthByte */
+ if (lengthByte & 0xf0) == 0 {
+ /* this nibble was not consumed for a double-nibble length above */
+ length = uint16(lengthByte)
+ if length < 12 {
+ /* single-nibble length in LSBs */
+ offsets = append(offsets, offset)
+ lengths = append(lengths, length)
+
+ offset += length
+ i++
+ }
+ } else {
+ length = 0 /* prevent double-nibble detection in the next iteration */
+ }
+ }
+
+ /* now, s is at the first group string */
+ return s
+}
+
+func (names *unames) compareName(name []byte, choice NameChoice, otherName string) bool {
+ tokens := names.tokens
+
+ tokenCount := tokens[0]
+ tokens = tokens[1:]
+
+ otherNameLen := len(otherName)
+
+ for len(name) > 0 && len(otherName) > 0 {
+ c := name[0]
+ name = name[1:]
+
+ if uint16(c) >= tokenCount {
+ if c != ';' {
+ if c != otherName[0] {
+ return false
+ }
+ otherName = otherName[1:]
+ } else {
+ break
+ }
+ } else {
+ token := tokens[c]
+ if int16(token) == -2 {
+ token = tokens[int(c)<<8|int(name[0])]
+ name = name[1:]
+ }
+ if int16(token) == -1 {
+ if c != ';' {
+ if c != otherName[0] {
+ return false
+ }
+ otherName = otherName[1:]
+ } else {
+ if len(otherName) == otherNameLen && choice == ExtendedCharName {
+ if ';' >= tokenCount || int16(tokens[';']) == -1 {
+ continue
+ }
+ }
+ break
+ }
+ } else {
+ tokenString := names.tokenStrings[token:]
+ for tokenString[0] != 0 && len(otherName) > 0 {
+ if tokenString[0] != otherName[0] {
+ return false
+ }
+ tokenString = tokenString[1:]
+ otherName = otherName[1:]
+ }
+ }
+ }
+ }
+
+ return len(otherName) == 0
+}
diff --git a/go/mysql/icuregex/internal/unames/unames_test.go b/go/mysql/icuregex/internal/unames/unames_test.go
new file mode 100644
index 00000000000..f15353eef8d
--- /dev/null
+++ b/go/mysql/icuregex/internal/unames/unames_test.go
@@ -0,0 +1,64 @@
+/*
+© 2016 and later: Unicode, Inc. and others.
+Copyright (C) 2004-2015, International Business Machines Corporation and others.
+Copyright 2023 The Vitess Authors.
+
+This file contains code derived from the Unicode Project's ICU library.
+License & terms of use for the original code: http://www.unicode.org/copyright.html
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package unames
+
+import (
+ "testing"
+)
+
+func TestCharForName(t *testing.T) {
+ var TestNames = []struct {
+ code rune
+ name, oldName, extName string
+ }{
+ {0x0061, "LATIN SMALL LETTER A", "", "LATIN SMALL LETTER A"},
+ {0x01a2, "LATIN CAPITAL LETTER OI", "", "LATIN CAPITAL LETTER OI"},
+ {0x0284, "LATIN SMALL LETTER DOTLESS J WITH STROKE AND HOOK", "", "LATIN SMALL LETTER DOTLESS J WITH STROKE AND HOOK"},
+ {0x0fd0, "TIBETAN MARK BSKA- SHOG GI MGO RGYAN", "", "TIBETAN MARK BSKA- SHOG GI MGO RGYAN"},
+ {0x3401, "CJK UNIFIED IDEOGRAPH-3401", "", "CJK UNIFIED IDEOGRAPH-3401"},
+ {0x7fed, "CJK UNIFIED IDEOGRAPH-7FED", "", "CJK UNIFIED IDEOGRAPH-7FED"},
+ {0xac00, "HANGUL SYLLABLE GA", "", "HANGUL SYLLABLE GA"},
+ {0xd7a3, "HANGUL SYLLABLE HIH", "", "HANGUL SYLLABLE HIH"},
+ {0xd800, "", "", ""},
+ {0xdc00, "", "", ""},
+ {0xff08, "FULLWIDTH LEFT PARENTHESIS", "", "FULLWIDTH LEFT PARENTHESIS"},
+ {0xffe5, "FULLWIDTH YEN SIGN", "", "FULLWIDTH YEN SIGN"},
+ {0xffff, "", "", ""},
+ {0x1d0c5, "BYZANTINE MUSICAL SYMBOL FHTORA SKLIRON CHROMA VASIS", "", "BYZANTINE MUSICAL SYMBOL FHTORA SKLIRON CHROMA VASIS"},
+ {0x23456, "CJK UNIFIED IDEOGRAPH-23456", "", "CJK UNIFIED IDEOGRAPH-23456"},
+ }
+
+ for _, tn := range TestNames {
+ if tn.name != "" {
+ r := CharForName(UnicodeCharName, tn.name)
+ if r != tn.code {
+ t.Errorf("CharFromName(U_UNICODE_CHAR_NAME, %q) = '%c' (U+%d), expected %c (U+%d)", tn.name, r, r, tn.code, tn.code)
+ }
+ }
+ if tn.extName != "" {
+ r := CharForName(ExtendedCharName, tn.extName)
+ if r != tn.code {
+ t.Errorf("CharFromName(U_EXTENDED_CHAR_NAME, %q) = '%c' (U+%d), expected %c (U+%d)", tn.extName, r, r, tn.code, tn.code)
+ }
+ }
+ }
+}
diff --git a/go/mysql/icuregex/internal/uprops/constants.go b/go/mysql/icuregex/internal/uprops/constants.go
new file mode 100644
index 00000000000..4cdf1ef8a0b
--- /dev/null
+++ b/go/mysql/icuregex/internal/uprops/constants.go
@@ -0,0 +1,664 @@
+/*
+© 2016 and later: Unicode, Inc. and others.
+Copyright (C) 2004-2015, International Business Machines Corporation and others.
+Copyright 2023 The Vitess Authors.
+
+This file contains code derived from the Unicode Project's ICU library.
+License & terms of use for the original code: http://www.unicode.org/copyright.html
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package uprops
+
+type Property int32
+
+const (
+ /*
+ * Note: UProperty constants are parsed by preparseucd.py.
+ * It matches lines like
+ * UCHAR_=,
+ */
+
+ /* Note: Place UCHAR_ALPHABETIC before UCHAR_BINARY_START so that
+ debuggers display UCHAR_ALPHABETIC as the symbolic name for 0,
+ rather than UCHAR_BINARY_START. Likewise for other *_START
+ identifiers. */
+
+ /** Binary property Alphabetic. Same as u_isUAlphabetic, different from u_isalpha.
+ Lu+Ll+Lt+Lm+Lo+Nl+Other_Alphabetic @stable ICU 2.1 */
+ UCharAlphabetic Property = 0
+ /** First constant for binary Unicode properties. @stable ICU 2.1 */
+ UCharBinaryStart = UCharAlphabetic
+ /** Binary property ASCII_Hex_Digit. 0-9 A-F a-f @stable ICU 2.1 */
+ UCharASCIIHexDigit Property = 1
+ /** Binary property Bidi_Control.
+ Format controls which have specific functions
+ in the Bidi Algorithm. @stable ICU 2.1 */
+ UCharBidiControl Property = 2
+ /** Binary property Bidi_Mirrored.
+ Characters that may change display in RTL text.
+ Same as u_isMirrored.
+ See Bidi Algorithm, UTR 9. @stable ICU 2.1 */
+ UCharBidiMirrored Property = 3
+ /** Binary property Dash. Variations of dashes. @stable ICU 2.1 */
+ UCharDash Property = 4
+ /** Binary property Default_Ignorable_Code_Point (new in Unicode 3.2).
+ Ignorable in most processing.
+ <2060..206F, FFF0..FFFB, E0000..E0FFF>+Other_Default_Ignorable_Code_Point+(Cf+Cc+Cs-White_Space) @stable ICU 2.1 */
+ UCharDefaultIgnorableCodePoint Property = 5
+ /** Binary property Deprecated (new in Unicode 3.2).
+ The usage of deprecated characters is strongly discouraged. @stable ICU 2.1 */
+ UCharDeprecated Property = 6
+ /** Binary property Diacritic. Characters that linguistically modify
+ the meaning of another character to which they apply. @stable ICU 2.1 */
+ UCharDiacritic Property = 7
+ /** Binary property Extender.
+ Extend the value or shape of a preceding alphabetic character,
+ e.g., length and iteration marks. @stable ICU 2.1 */
+ UCharExtender Property = 8
+ /** Binary property Full_Composition_Exclusion.
+ CompositionExclusions.txt+Singleton Decompositions+
+ Non-Starter Decompositions. @stable ICU 2.1 */
+ UCharFullCompositionExclusion Property = 9
+ /** Binary property Grapheme_Base (new in Unicode 3.2).
+ For programmatic determination of grapheme cluster boundaries.
+ [0..10FFFF]-Cc-Cf-Cs-Co-Cn-Zl-Zp-Grapheme_Link-Grapheme_Extend-CGJ @stable ICU 2.1 */
+ UCharGraphemeBase Property = 10
+ /** Binary property Grapheme_Extend (new in Unicode 3.2).
+ For programmatic determination of grapheme cluster boundaries.
+ Me+Mn+Mc+Other_Grapheme_Extend-Grapheme_Link-CGJ @stable ICU 2.1 */
+ UCharGraphemeExtend Property = 11
+ /** Binary property Grapheme_Link (new in Unicode 3.2).
+ For programmatic determination of grapheme cluster boundaries. @stable ICU 2.1 */
+ UCharGraphemeLink Property = 12
+ /** Binary property Hex_Digit.
+ Characters commonly used for hexadecimal numbers. @stable ICU 2.1 */
+ UCharHexDigit Property = 13
+ /** Binary property Hyphen. Dashes used to mark connections
+ between pieces of words, plus the Katakana middle dot. @stable ICU 2.1 */
+ UCharHyphen Property = 14
+ /** Binary property ID_Continue.
+ Characters that can continue an identifier.
+ DerivedCoreProperties.txt also says "NOTE: Cf characters should be filtered out."
+ ID_Start+Mn+Mc+Nd+Pc @stable ICU 2.1 */
+ UCharIDContinue Property = 15
+ /** Binary property ID_Start.
+ Characters that can start an identifier.
+ Lu+Ll+Lt+Lm+Lo+Nl @stable ICU 2.1 */
+ UCharIDStart Property = 16
+ /** Binary property Ideographic.
+ CJKV ideographs. @stable ICU 2.1 */
+ UCharIdeographic Property = 17
+ /** Binary property IDS_Binary_Operator (new in Unicode 3.2).
+ For programmatic determination of
+ Ideographic Description Sequences. @stable ICU 2.1 */
+ UCharIdsBinaryOperator Property = 18
+ /** Binary property IDS_Trinary_Operator (new in Unicode 3.2).
+ For programmatic determination of
+ Ideographic Description Sequences. @stable ICU 2.1 */
+ UCharIdsTrinaryOperator Property = 19
+ /** Binary property Join_Control.
+ Format controls for cursive joining and ligation. @stable ICU 2.1 */
+ UCharJoinControl Property = 20
+ /** Binary property Logical_Order_Exception (new in Unicode 3.2).
+ Characters that do not use logical order and
+ require special handling in most processing. @stable ICU 2.1 */
+ UCharLogicalOrderException Property = 21
+ /** Binary property Lowercase. Same as u_isULowercase, different from u_islower.
+ Ll+Other_Lowercase @stable ICU 2.1 */
+ UCharLowercase Property = 22
+ /** Binary property Math. Sm+Other_Math @stable ICU 2.1 */
+ UCharMath Property = 23
+ /** Binary property Noncharacter_Code_Point.
+ Code points that are explicitly defined as illegal
+ for the encoding of characters. @stable ICU 2.1 */
+ UCharNoncharacterCodePoint Property = 24
+ /** Binary property Quotation_Mark. @stable ICU 2.1 */
+ UCharQuotationMark Property = 25
+ /** Binary property Radical (new in Unicode 3.2).
+ For programmatic determination of
+ Ideographic Description Sequences. @stable ICU 2.1 */
+ UCharRadical Property = 26
+ /** Binary property Soft_Dotted (new in Unicode 3.2).
+ Characters with a "soft dot", like i or j.
+ An accent placed on these characters causes
+ the dot to disappear. @stable ICU 2.1 */
+ UCharSoftDotted Property = 27
+ /** Binary property Terminal_Punctuation.
+ Punctuation characters that generally mark
+ the end of textual units. @stable ICU 2.1 */
+ UCharTerminalPunctuation Property = 28
+ /** Binary property Unified_Ideograph (new in Unicode 3.2).
+ For programmatic determination of
+ Ideographic Description Sequences. @stable ICU 2.1 */
+ UCharUnifiedIdeograph Property = 29
+ /** Binary property Uppercase. Same as u_isUUppercase, different from u_isupper.
+ Lu+Other_Uppercase @stable ICU 2.1 */
+ UCharUppercase Property = 30
+ /** Binary property White_Space.
+ Same as u_isUWhiteSpace, different from u_isspace and u_isWhitespace.
+ Space characters+TAB+CR+LF-ZWSP-ZWNBSP @stable ICU 2.1 */
+ UCharWhiteSpace Property = 31
+ /** Binary property XID_Continue.
+ ID_Continue modified to allow closure under
+ normalization forms NFKC and NFKD. @stable ICU 2.1 */
+ UCharXidContinue Property = 32
+ /** Binary property XID_Start. ID_Start modified to allow
+ closure under normalization forms NFKC and NFKD. @stable ICU 2.1 */
+ UCharXidStart Property = 33
+ /** Binary property Case_Sensitive. Either the source of a case
+ mapping or _in_ the target of a case mapping. Not the same as
+ the general category Cased_Letter. @stable ICU 2.6 */
+ UCharCaseSensitive Property = 34
+ /** Binary property STerm (new in Unicode 4.0.1).
+ Sentence Terminal. Used in UAX #29: Text Boundaries
+ (http://www.unicode.org/reports/tr29/)
+ @stable ICU 3.0 */
+ UCharSTerm Property = 35
+ /** Binary property Variation_Selector (new in Unicode 4.0.1).
+ Indicates all those characters that qualify as Variation Selectors.
+ For details on the behavior of these characters,
+ see StandardizedVariants.html and 15.6 Variation Selectors.
+ @stable ICU 3.0 */
+ UCharVariationSelector Property = 36
+ /** Binary property NFD_Inert.
+ ICU-specific property for characters that are inert under NFD,
+ i.e., they do not interact with adjacent characters.
+ See the documentation for the Normalizer2 class and the
+ Normalizer2::isInert() method.
+ @stable ICU 3.0 */
+ UCharNfdInert Property = 37
+ /** Binary property NFKD_Inert.
+ ICU-specific property for characters that are inert under NFKD,
+ i.e., they do not interact with adjacent characters.
+ See the documentation for the Normalizer2 class and the
+ Normalizer2::isInert() method.
+ @stable ICU 3.0 */
+ UCharNfkdInert Property = 38
+ /** Binary property NFC_Inert.
+ ICU-specific property for characters that are inert under NFC,
+ i.e., they do not interact with adjacent characters.
+ See the documentation for the Normalizer2 class and the
+ Normalizer2::isInert() method.
+ @stable ICU 3.0 */
+ UCharNfcInert Property = 39
+ /** Binary property NFKC_Inert.
+ ICU-specific property for characters that are inert under NFKC,
+ i.e., they do not interact with adjacent characters.
+ See the documentation for the Normalizer2 class and the
+ Normalizer2::isInert() method.
+ @stable ICU 3.0 */
+ UCharNfkcInert Property = 40
+ /** Binary Property Segment_Starter.
+ ICU-specific property for characters that are starters in terms of
+ Unicode normalization and combining character sequences.
+ They have ccc=0 and do not occur in non-initial position of the
+ canonical decomposition of any character
+ (like a-umlaut in NFD and a Jamo T in an NFD(Hangul LVT)).
+ ICU uses this property for segmenting a string for generating a set of
+ canonically equivalent strings, e.g. for canonical closure while
+ processing collation tailoring rules.
+ @stable ICU 3.0 */
+ UCharSegmentStarter Property = 41
+ /** Binary property Pattern_Syntax (new in Unicode 4.1).
+ See UAX #31 Identifier and Pattern Syntax
+ (http://www.unicode.org/reports/tr31/)
+ @stable ICU 3.4 */
+ UCharPatternSyntax Property = 42
+ /** Binary property Pattern_White_Space (new in Unicode 4.1).
+ See UAX #31 Identifier and Pattern Syntax
+ (http://www.unicode.org/reports/tr31/)
+ @stable ICU 3.4 */
+ UCharPatternWhiteSpace Property = 43
+ /** Binary property alnum (a C/POSIX character class).
+ Implemented according to the UTS #18 Annex C Standard Recommendation.
+ See the uchar.h file documentation.
+ @stable ICU 3.4 */
+ UCharPosixAlnum Property = 44
+ /** Binary property blank (a C/POSIX character class).
+ Implemented according to the UTS #18 Annex C Standard Recommendation.
+ See the uchar.h file documentation.
+ @stable ICU 3.4 */
+ UCharPosixBlank Property = 45
+ /** Binary property graph (a C/POSIX character class).
+ Implemented according to the UTS #18 Annex C Standard Recommendation.
+ See the uchar.h file documentation.
+ @stable ICU 3.4 */
+ UCharPosixGraph Property = 46
+ /** Binary property print (a C/POSIX character class).
+ Implemented according to the UTS #18 Annex C Standard Recommendation.
+ See the uchar.h file documentation.
+ @stable ICU 3.4 */
+ UCharPosixPrint Property = 47
+ /** Binary property xdigit (a C/POSIX character class).
+ Implemented according to the UTS #18 Annex C Standard Recommendation.
+ See the uchar.h file documentation.
+ @stable ICU 3.4 */
+ UCharPosixXdigit Property = 48
+ /** Binary property Cased. For Lowercase, Uppercase and Titlecase characters. @stable ICU 4.4 */
+ UCharCased Property = 49
+ /** Binary property Case_Ignorable. Used in context-sensitive case mappings. @stable ICU 4.4 */
+ UCharCaseIgnorable Property = 50
+ /** Binary property Changes_When_Lowercased. @stable ICU 4.4 */
+ UCharChangesWhenLowercased Property = 51
+ /** Binary property Changes_When_Uppercased. @stable ICU 4.4 */
+ UCharChangesWhenUppercased Property = 52
+ /** Binary property Changes_When_Titlecased. @stable ICU 4.4 */
+ UCharChangesWhenTitlecased Property = 53
+ /** Binary property Changes_When_Casefolded. @stable ICU 4.4 */
+ UCharChangesWhenCasefolded Property = 54
+ /** Binary property Changes_When_Casemapped. @stable ICU 4.4 */
+ UCharChangesWhenCasemapped Property = 55
+ /** Binary property Changes_When_NFKC_Casefolded. @stable ICU 4.4 */
+ UCharChangesWhenNfkcCasefolded Property = 56
+ /**
+ * Binary property Emoji.
+ * See http://www.unicode.org/reports/tr51/#Emoji_Properties
+ *
+ * @stable ICU 57
+ */
+ UCharEmoji Property = 57
+ /**
+ * Binary property Emoji_Presentation.
+ * See http://www.unicode.org/reports/tr51/#Emoji_Properties
+ *
+ * @stable ICU 57
+ */
+ UCharEmojiPresentation Property = 58
+ /**
+ * Binary property Emoji_Modifier.
+ * See http://www.unicode.org/reports/tr51/#Emoji_Properties
+ *
+ * @stable ICU 57
+ */
+ UCharEmojiModifier Property = 59
+ /**
+ * Binary property Emoji_Modifier_Base.
+ * See http://www.unicode.org/reports/tr51/#Emoji_Properties
+ *
+ * @stable ICU 57
+ */
+ UCharEmojiModifierBase Property = 60
+ /**
+ * Binary property Emoji_Component.
+ * See http://www.unicode.org/reports/tr51/#Emoji_Properties
+ *
+ * @stable ICU 60
+ */
+ UCharEmojiComponent Property = 61
+ /**
+ * Binary property Regional_Indicator.
+ * @stable ICU 60
+ */
+ UCharRegionalIndicator Property = 62
+ /**
+ * Binary property Prepended_Concatenation_Mark.
+ * @stable ICU 60
+ */
+ UCharPrependedConcatenationMark Property = 63
+ /**
+ * Binary property Extended_Pictographic.
+ * See http://www.unicode.org/reports/tr51/#Emoji_Properties
+ *
+ * @stable ICU 62
+ */
+ UCharExtendedPictographic Property = 64
+
+ /**
+ * Binary property of strings Basic_Emoji.
+ * See https://www.unicode.org/reports/tr51/#Emoji_Sets
+ *
+ * @stable ICU 70
+ */
+ UCharBasicEmoji Property = 65
+ /**
+ * Binary property of strings Emoji_Keycap_Sequence.
+ * See https://www.unicode.org/reports/tr51/#Emoji_Sets
+ *
+ * @stable ICU 70
+ */
+ UCharEmojiKeycapSequence Property = 66
+ /**
+ * Binary property of strings RGI_Emoji_Modifier_Sequence.
+ * See https://www.unicode.org/reports/tr51/#Emoji_Sets
+ *
+ * @stable ICU 70
+ */
+ UCharRgiEmojiModifierSequence Property = 67
+ /**
+ * Binary property of strings RGI_Emoji_Flag_Sequence.
+ * See https://www.unicode.org/reports/tr51/#Emoji_Sets
+ *
+ * @stable ICU 70
+ */
+ UCharRgiEmojiFlagSequence Property = 68
+ /**
+ * Binary property of strings RGI_Emoji_Tag_Sequence.
+ * See https://www.unicode.org/reports/tr51/#Emoji_Sets
+ *
+ * @stable ICU 70
+ */
+ UCharRgiEmojiTagSequence Property = 69
+ /**
+ * Binary property of strings RGI_Emoji_ZWJ_Sequence.
+ * See https://www.unicode.org/reports/tr51/#Emoji_Sets
+ *
+ * @stable ICU 70
+ */
+ UCharRgiEmojiZwjSequence Property = 70
+ /**
+ * Binary property of strings RGI_Emoji.
+ * See https://www.unicode.org/reports/tr51/#Emoji_Sets
+ *
+ * @stable ICU 70
+ */
+ UCharRgiEmoji Property = 71
+
+ /** Enumerated property Bidi_Class.
+ Same as u_charDirection, returns UCharDirection values. @stable ICU 2.2 */
+ UCharBidiClass Property = 0x1000
+ /** First constant for enumerated/integer Unicode properties. @stable ICU 2.2 */
+ UCharIntStart = UCharBidiClass
+ /** Enumerated property Block.
+ Same as ublock_getCode, returns UBlockCode values. @stable ICU 2.2 */
+ UCharBlock Property = 0x1001
+ /** Enumerated property Canonical_Combining_Class.
+ Same as u_getCombiningClass, returns 8-bit numeric values. @stable ICU 2.2 */
+ UCharCanonicalCombiningClass Property = 0x1002
+ /** Enumerated property Decomposition_Type.
+ Returns UDecompositionType values. @stable ICU 2.2 */
+ UCharDecompositionType Property = 0x1003
+ /** Enumerated property East_Asian_Width.
+ See http://www.unicode.org/reports/tr11/
+ Returns UEastAsianWidth values. @stable ICU 2.2 */
+ UCharEastAsianWidth Property = 0x1004
+ /** Enumerated property General_Category.
+ Same as u_charType, returns UCharCategory values. @stable ICU 2.2 */
+ UCharGeneralCategory Property = 0x1005
+ /** Enumerated property Joining_Group.
+ Returns UJoiningGroup values. @stable ICU 2.2 */
+ UCharJoiningGroup Property = 0x1006
+ /** Enumerated property Joining_Type.
+ Returns UJoiningType values. @stable ICU 2.2 */
+ UCharJoiningType Property = 0x1007
+ /** Enumerated property Line_Break.
+ Returns ULineBreak values. @stable ICU 2.2 */
+ UCharLineBreak Property = 0x1008
+ /** Enumerated property Numeric_Type.
+ Returns UNumericType values. @stable ICU 2.2 */
+ UCharNumericType Property = 0x1009
+ /** Enumerated property Script.
+ Same as uscript_getScript, returns UScriptCode values. @stable ICU 2.2 */
+ UCharScript Property = 0x100A
+ /** Enumerated property Hangul_Syllable_Type, new in Unicode 4.
+ Returns UHangulSyllableType values. @stable ICU 2.6 */
+ UCharHangulSyllableType Property = 0x100B
+ /** Enumerated property NFD_Quick_Check.
+ Returns UNormalizationCheckResult values. @stable ICU 3.0 */
+ UCharNfdQuickCheck Property = 0x100C
+ /** Enumerated property NFKD_Quick_Check.
+ Returns UNormalizationCheckResult values. @stable ICU 3.0 */
+ UCharNfkdQuickCheck Property = 0x100D
+ /** Enumerated property NFC_Quick_Check.
+ Returns UNormalizationCheckResult values. @stable ICU 3.0 */
+ UCharNfcQuickCheck Property = 0x100E
+ /** Enumerated property NFKC_Quick_Check.
+ Returns UNormalizationCheckResult values. @stable ICU 3.0 */
+ UCharNfkcQuickCheck Property = 0x100F
+ /** Enumerated property Lead_Canonical_Combining_Class.
+ ICU-specific property for the ccc of the first code point
+ of the decomposition, or lccc(c)=ccc(NFD(c)[0]).
+ Useful for checking for canonically ordered text;
+ see UNORM_FCD and http://www.unicode.org/notes/tn5/#FCD .
+ Returns 8-bit numeric values like UCHAR_CANONICAL_COMBINING_CLASS. @stable ICU 3.0 */
+ UCharLeadCanonicalCombiningClass Property = 0x1010
+ /** Enumerated property Trail_Canonical_Combining_Class.
+ ICU-specific property for the ccc of the last code point
+ of the decomposition, or tccc(c)=ccc(NFD(c)[last]).
+ Useful for checking for canonically ordered text;
+ see UNORM_FCD and http://www.unicode.org/notes/tn5/#FCD .
+ Returns 8-bit numeric values like UCHAR_CANONICAL_COMBINING_CLASS. @stable ICU 3.0 */
+ UCharTrailCanonicalCombiningClass Property = 0x1011
+ /** Enumerated property Grapheme_Cluster_Break (new in Unicode 4.1).
+ Used in UAX #29: Text Boundaries
+ (http://www.unicode.org/reports/tr29/)
+ Returns UGraphemeClusterBreak values. @stable ICU 3.4 */
+ UCharGraphemeClusterBreak Property = 0x1012
+ /** Enumerated property Sentence_Break (new in Unicode 4.1).
+ Used in UAX #29: Text Boundaries
+ (http://www.unicode.org/reports/tr29/)
+ Returns USentenceBreak values. @stable ICU 3.4 */
+ UCharSentenceBreak Property = 0x1013
+ /** Enumerated property Word_Break (new in Unicode 4.1).
+ Used in UAX #29: Text Boundaries
+ (http://www.unicode.org/reports/tr29/)
+ Returns UWordBreakValues values. @stable ICU 3.4 */
+ UCharWordBreak Property = 0x1014
+ /** Enumerated property Bidi_Paired_Bracket_Type (new in Unicode 6.3).
+ Used in UAX #9: Unicode Bidirectional Algorithm
+ (http://www.unicode.org/reports/tr9/)
+ Returns UBidiPairedBracketType values. @stable ICU 52 */
+ UCharBidiPairedBracketType Property = 0x1015
+ /**
+ * Enumerated property Indic_Positional_Category.
+ * New in Unicode 6.0 as provisional property Indic_Matra_Category;
+ * renamed and changed to informative in Unicode 8.0.
+ * See http://www.unicode.org/reports/tr44/#IndicPositionalCategory.txt
+ * @stable ICU 63
+ */
+ UCharIndicPositionalCategory Property = 0x1016
+ /**
+ * Enumerated property Indic_Syllabic_Category.
+ * New in Unicode 6.0 as provisional; informative since Unicode 8.0.
+ * See http://www.unicode.org/reports/tr44/#IndicSyllabicCategory.txt
+ * @stable ICU 63
+ */
+ UCharIndicSyllableCategory Property = 0x1017
+ /**
+ * Enumerated property Vertical_Orientation.
+ * Used for UAX #50 Unicode Vertical Text Layout (https://www.unicode.org/reports/tr50/).
+ * New as a UCD property in Unicode 10.0.
+ * @stable ICU 63
+ */
+ UCharVerticalOrientation Property = 0x1018
+
+ /** Bitmask property General_Category_Mask.
+ This is the General_Category property returned as a bit mask.
+ When used in u_getIntPropertyValue(c), same as U_MASK(u_charType(c)),
+ returns bit masks for UCharCategory values where exactly one bit is set.
+ When used with u_getPropertyValueName() and u_getPropertyValueEnum(),
+ a multi-bit mask is used for sets of categories like "Letters".
+ Mask values should be cast to uint32_t.
+ @stable ICU 2.4 */
+ UCharGeneralCategoryMask Property = 0x2000
+ /** First constant for bit-mask Unicode properties. @stable ICU 2.4 */
+ UCharMaskStart = UCharGeneralCategoryMask
+ /** Double property Numeric_Value.
+ Corresponds to u_getNumericValue. @stable ICU 2.4 */
+ UCharNumericValue Property = 0x3000
+ /** First constant for double Unicode properties. @stable ICU 2.4 */
+ UCharDoubleStart = UCharNumericValue
+ /** String property Age.
+ Corresponds to u_charAge. @stable ICU 2.4 */
+ UCharAge Property = 0x4000
+ /** First constant for string Unicode properties. @stable ICU 2.4 */
+ UCharStringStart = UCharAge
+ /** String property Bidi_Mirroring_Glyph.
+ Corresponds to u_charMirror. @stable ICU 2.4 */
+ UCharBidiMirroringGlyph Property = 0x4001
+ /** String property Case_Folding.
+ Corresponds to u_strFoldCase in ustring.h. @stable ICU 2.4 */
+ UCharCaseFolding Property = 0x4002
+ /** String property Lowercase_Mapping.
+ Corresponds to u_strToLower in ustring.h. @stable ICU 2.4 */
+ UCharLowercaseMapping Property = 0x4004
+ /** String property Name.
+ Corresponds to u_charName. @stable ICU 2.4 */
+ UCharName Property = 0x4005
+ /** String property Simple_Case_Folding.
+ Corresponds to u_foldCase. @stable ICU 2.4 */
+ UCharSimpleCaseFolding Property = 0x4006
+ /** String property Simple_Lowercase_Mapping.
+ Corresponds to u_tolower. @stable ICU 2.4 */
+ UCharSimpleLowercaseMapping Property = 0x4007
+ /** String property Simple_Titlecase_Mapping.
+ Corresponds to u_totitle. @stable ICU 2.4 */
+ UcharSimpleTitlecaseMapping Property = 0x4008
+ /** String property Simple_Uppercase_Mapping.
+ Corresponds to u_toupper. @stable ICU 2.4 */
+ UCharSimpleUppercaseMapping Property = 0x4009
+ /** String property Titlecase_Mapping.
+ Corresponds to u_strToTitle in ustring.h. @stable ICU 2.4 */
+ UCharTitlecaseMapping Property = 0x400A
+ /** String property Uppercase_Mapping.
+ Corresponds to u_strToUpper in ustring.h. @stable ICU 2.4 */
+ UCharUppercaseMapping Property = 0x400C
+ /** String property Bidi_Paired_Bracket (new in Unicode 6.3).
+ Corresponds to u_getBidiPairedBracket. @stable ICU 52 */
+ UCharBidiPairedBracket Property = 0x400D
+
+ /** Miscellaneous property Script_Extensions (new in Unicode 6.0).
+ Some characters are commonly used in multiple scripts.
+ For more information, see UAX #24: http://www.unicode.org/reports/tr24/.
+ Corresponds to uscript_hasScript and uscript_getScriptExtensions in uscript.h.
+ @stable ICU 4.6 */
+ UCharScriptExtensions Property = 0x7000
+ /** First constant for Unicode properties with unusual value types. @stable ICU 4.6 */
+ UCharOtherPropertyStart = UCharScriptExtensions
+
+ /** Represents a nonexistent or invalid property or property value. @stable ICU 2.4 */
+ UCharInvalidCode Property = -1
+)
+
+const (
+ uCharBinaryLimit = 72
+ uCharIntLimit = 0x1019
+ uCharMaskLimit = 0x2001
+ uCharStringLimit = 0x400E
+)
+
+/*
+ * Properties in vector word 1
+ * Each bit encodes one binary property.
+ * The following constants represent the bit number, use 1<= 0 {
+ set.AddRuneRange(startHasProperty, c-1)
+ startHasProperty = -1
+ }
+ }
+ }
+ if startHasProperty >= 0 {
+ set.AddRuneRange(startHasProperty, uset.MaxValue)
+ }
+
+ inclusionsForProperty[prop] = set
+ return set, nil
+}
+
+func getInclusionsForIntProperty(prop Property) (*uset.UnicodeSet, error) {
+ if inc, ok := inclusionsForProperty[prop]; ok {
+ return inc, nil
+ }
+
+ src := prop.source()
+ incl, err := getInclusionsForSource(src)
+ if err != nil {
+ return nil, err
+ }
+
+ intPropIncl := uset.New()
+ intPropIncl.AddRune(0)
+
+ numRanges := incl.RangeCount()
+ prevValue := int32(0)
+
+ for i := 0; i < numRanges; i++ {
+ rangeEnd := incl.RangeEnd(i)
+ for c := incl.RangeStart(i); c <= rangeEnd; c++ {
+ value := getIntPropertyValue(c, prop)
+ if value != prevValue {
+ intPropIncl.AddRune(c)
+ prevValue = value
+ }
+ }
+ }
+
+ inclusionsForProperty[prop] = intPropIncl
+ return intPropIncl, nil
+}
+
+func ApplyIntPropertyValue(u *uset.UnicodeSet, prop Property, value int32) error {
+ switch {
+ case prop == UCharGeneralCategoryMask:
+ inclusions, err := getInclusionsForProperty(prop)
+ if err != nil {
+ return err
+ }
+ u.ApplyFilter(inclusions, func(ch rune) bool {
+ return (uchar.Mask(uchar.CharType(ch)) & uint32(value)) != 0
+ })
+ case prop == UCharScriptExtensions:
+ inclusions, err := getInclusionsForProperty(prop)
+ if err != nil {
+ return err
+ }
+ u.ApplyFilter(inclusions, func(ch rune) bool {
+ return uscriptHasScript(ch, code(value))
+ })
+ case 0 <= prop && prop < uCharBinaryLimit:
+ if value == 0 || value == 1 {
+ set, err := getInclusionsForBinaryProperty(prop)
+ if err != nil {
+ return err
+ }
+ u.CopyFrom(set)
+ if value == 0 {
+ u.Complement()
+ }
+ } else {
+ u.Clear()
+ }
+
+ case UCharIntStart <= prop && prop < uCharIntLimit:
+ inclusions, err := getInclusionsForProperty(prop)
+ if err != nil {
+ return err
+ }
+ u.ApplyFilter(inclusions, func(ch rune) bool {
+ return getIntPropertyValue(ch, prop) == value
+ })
+ default:
+ return errors.ErrUnsupported
+ }
+ return nil
+}
+
+func mungeCharName(charname string) string {
+ out := make([]byte, 0, len(charname))
+ for _, ch := range []byte(charname) {
+ j := len(out)
+ if ch == ' ' && (j == 0 || out[j-1] == ' ') {
+ continue
+ }
+ out = append(out, ch)
+ }
+ return string(out)
+}
+
+func ApplyPropertyPattern(u *uset.UnicodeSet, pat string) error {
+ if len(pat) < 5 {
+ return errors.ErrIllegalArgument
+ }
+
+ var posix, isName, invert bool
+
+ if isPOSIXOpen(pat) {
+ posix = true
+ pat = pattern.SkipWhitespace(pat[2:])
+ if len(pat) > 0 && pat[0] == '^' {
+ pat = pat[1:]
+ invert = true
+ }
+ } else if isPerlOpen(pat) || isNameOpen(pat) {
+ c := pat[1]
+ invert = c == 'P'
+ isName = c == 'N'
+ pat = pattern.SkipWhitespace(pat[2:])
+ if len(pat) == 0 || pat[0] != '{' {
+ return errors.ErrIllegalArgument
+ }
+ pat = pat[1:]
+ } else {
+ return errors.ErrIllegalArgument
+ }
+
+ var closePos int
+ if posix {
+ closePos = strings.Index(pat, ":]")
+ } else {
+ closePos = strings.IndexByte(pat, '}')
+ }
+ if closePos < 0 {
+ return errors.ErrIllegalArgument
+ }
+
+ equals := strings.IndexByte(pat, '=')
+ var propName, valueName string
+ if equals >= 0 && equals < closePos && !isName {
+ propName = pat[:equals]
+ valueName = pat[equals+1 : closePos]
+ } else {
+ propName = pat[:closePos]
+ if isName {
+ valueName = propName
+ propName = "na"
+ }
+ }
+
+ if err := ApplyPropertyAlias(u, propName, valueName); err != nil {
+ return err
+ }
+ if invert {
+ u.Complement()
+ }
+ return nil
+}
+
+func isPOSIXOpen(pattern string) bool {
+ return pattern[0] == '[' && pattern[1] == ':'
+}
+
+func isNameOpen(pattern string) bool {
+ return pattern[0] == '\\' && pattern[1] == 'N'
+}
+
+func isPerlOpen(pattern string) bool {
+ return pattern[0] == '\\' && (pattern[1] == 'p' || pattern[1] == 'P')
+}
+
+func ApplyPropertyAlias(u *uset.UnicodeSet, prop, value string) error {
+ var p Property
+ var v int32
+ var invert bool
+
+ if len(value) > 0 {
+ p = getPropertyEnum(prop)
+ if p == -1 {
+ return errors.ErrIllegalArgument
+ }
+ if p == UCharGeneralCategory {
+ p = UCharGeneralCategoryMask
+ }
+
+ if (p >= UCharBinaryStart && p < uCharBinaryLimit) ||
+ (p >= UCharIntStart && p < uCharIntLimit) ||
+ (p >= UCharMaskStart && p < uCharMaskLimit) {
+ v = getPropertyValueEnum(p, value)
+ if v == -1 {
+ // Handle numeric CCC
+ if p == UCharCanonicalCombiningClass ||
+ p == UCharTrailCanonicalCombiningClass ||
+ p == UCharLeadCanonicalCombiningClass {
+ val, err := strconv.ParseUint(value, 10, 8)
+ if err != nil {
+ return errors.ErrIllegalArgument
+ }
+ v = int32(val)
+ } else {
+ return errors.ErrIllegalArgument
+ }
+ }
+ } else {
+ switch p {
+ case UCharNumericValue:
+ val, err := strconv.ParseFloat(value, 64)
+ if err != nil {
+ return errors.ErrIllegalArgument
+ }
+ incl, err := getInclusionsForProperty(p)
+ if err != nil {
+ return err
+ }
+ u.ApplyFilter(incl, func(ch rune) bool {
+ return uchar.NumericValue(ch) == val
+ })
+ return nil
+ case UCharName:
+ // Must munge name, since u_charFromName() does not do
+ // 'loose' matching.
+ charName := mungeCharName(value)
+ ch := unames.CharForName(unames.ExtendedCharName, charName)
+ if ch < 0 {
+ return errors.ErrIllegalArgument
+ }
+ u.Clear()
+ u.AddRune(ch)
+ return nil
+ case UCharAge:
+ // Must munge name, since u_versionFromString() does not do
+ // 'loose' matching.
+ charName := mungeCharName(value)
+ version := uchar.VersionFromString(charName)
+ incl, err := getInclusionsForProperty(p)
+ if err != nil {
+ return err
+ }
+ u.ApplyFilter(incl, func(ch rune) bool {
+ return uchar.CharAge(ch) == version
+ })
+ return nil
+ case UCharScriptExtensions:
+ v = getPropertyValueEnum(UCharScript, value)
+ if v == -1 {
+ return errors.ErrIllegalArgument
+ }
+ default:
+ // p is a non-binary, non-enumerated property that we
+ // don't support (yet).
+ return errors.ErrIllegalArgument
+ }
+ }
+ } else {
+ // value is empty. Interpret as General Category, Script, or
+ // Binary property.
+ p = UCharGeneralCategoryMask
+ v = getPropertyValueEnum(p, prop)
+ if v == -1 {
+ p = UCharScript
+ v = getPropertyValueEnum(p, prop)
+ if v == -1 {
+ p = getPropertyEnum(prop)
+ if p >= UCharBinaryStart && p < uCharBinaryLimit {
+ v = 1
+ } else if 0 == comparePropertyNames("ANY", prop) {
+ u.Clear()
+ u.AddRuneRange(uset.MinValue, uset.MaxValue)
+ return nil
+ } else if 0 == comparePropertyNames("ASCII", prop) {
+ u.Clear()
+ u.AddRuneRange(0, 0x7F)
+ return nil
+ } else if 0 == comparePropertyNames("Assigned", prop) {
+ // [:Assigned:]=[:^Cn:]
+ p = UCharGeneralCategoryMask
+ v = int32(uchar.GcCnMask)
+ invert = true
+ } else {
+ return errors.ErrIllegalArgument
+ }
+ }
+ }
+ }
+
+ err := ApplyIntPropertyValue(u, p, v)
+ if err != nil {
+ return err
+ }
+ if invert {
+ u.Complement()
+ }
+ return nil
+}
+
+func AddULayoutPropertyStarts(src propertySource, u *uset.UnicodeSet) {
+ var trie *utrie.UcpTrie
+ switch src {
+ case srcInpc:
+ trie = ulayout.InpcTrie()
+ case srcInsc:
+ trie = ulayout.InscTrie()
+ case srcVo:
+ trie = ulayout.VoTrie()
+ default:
+ panic("unreachable")
+ }
+
+ // Add the start code point of each same-value range of the trie.
+ var start, end rune
+ for {
+ end, _ = trie.GetRange(start, utrie.UcpMapRangeNormal, 0, nil)
+ if end < 0 {
+ break
+ }
+ u.AddRune(start)
+ start = end + 1
+ }
+}
+
+func AddCategory(u *uset.UnicodeSet, mask uint32) error {
+ set := uset.New()
+ err := ApplyIntPropertyValue(set, UCharGeneralCategoryMask, int32(mask))
+ if err != nil {
+ return err
+ }
+ u.AddAll(set)
+ return nil
+}
+
+func NewUnicodeSetFomPattern(pattern string, flags uset.USet) (*uset.UnicodeSet, error) {
+ u := uset.New()
+ if err := ApplyPropertyPattern(u, pattern); err != nil {
+ return nil, err
+ }
+ if flags&uset.CaseInsensitive != 0 {
+ u.CloseOver(uset.CaseInsensitive)
+ }
+ return u, nil
+}
+
+func MustNewUnicodeSetFomPattern(pattern string, flags uset.USet) *uset.UnicodeSet {
+ u, err := NewUnicodeSetFomPattern(pattern, flags)
+ if err != nil {
+ panic(err)
+ }
+ return u
+}
diff --git a/go/mysql/icuregex/internal/uprops/uprops.go b/go/mysql/icuregex/internal/uprops/uprops.go
new file mode 100644
index 00000000000..0589938c29c
--- /dev/null
+++ b/go/mysql/icuregex/internal/uprops/uprops.go
@@ -0,0 +1,217 @@
+/*
+© 2016 and later: Unicode, Inc. and others.
+Copyright (C) 2004-2015, International Business Machines Corporation and others.
+Copyright 2023 The Vitess Authors.
+
+This file contains code derived from the Unicode Project's ICU library.
+License & terms of use for the original code: http://www.unicode.org/copyright.html
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package uprops
+
+import (
+ "vitess.io/vitess/go/mysql/icuregex/internal/bytestrie"
+ "vitess.io/vitess/go/mysql/icuregex/internal/uchar"
+)
+
+const (
+ ixValueMapsOffset = 0
+ ixByteTriesOffset = 1
+ ixNameGroupsOffset = 2
+ ixReserved3Offset = 3
+)
+
+func (prop Property) source() propertySource {
+ if prop < UCharBinaryStart {
+ return srcNone /* undefined */
+ } else if prop < uCharBinaryLimit {
+ bprop := binProps[prop]
+ if bprop.mask != 0 {
+ return srcPropsvec
+ }
+ return bprop.column
+ } else if prop < UCharIntStart {
+ return srcNone /* undefined */
+ } else if prop < uCharIntLimit {
+ iprop := intProps[prop-UCharIntStart]
+ if iprop.mask != 0 {
+ return srcPropsvec
+ }
+ return iprop.column
+ } else if prop < UCharStringStart {
+ switch prop {
+ case UCharGeneralCategoryMask,
+ UCharNumericValue:
+ return srcChar
+
+ default:
+ return srcNone
+ }
+ } else if prop < uCharStringLimit {
+ switch prop {
+ case UCharAge:
+ return srcPropsvec
+
+ case UCharBidiMirroringGlyph:
+ return srcBidi
+
+ case UCharCaseFolding,
+ UCharLowercaseMapping,
+ UCharSimpleCaseFolding,
+ UCharSimpleLowercaseMapping,
+ UcharSimpleTitlecaseMapping,
+ UCharSimpleUppercaseMapping,
+ UCharTitlecaseMapping,
+ UCharUppercaseMapping:
+ return srcCase
+
+ /* UCHAR_ISO_COMMENT, UCHAR_UNICODE_1_NAME (deprecated) */
+ case UCharName:
+ return srcNames
+
+ default:
+ return srcNone
+ }
+ } else {
+ switch prop {
+ case UCharScriptExtensions:
+ return srcPropsvec
+ default:
+ return srcNone /* undefined */
+ }
+ }
+}
+
+func getPropertyEnum(alias string) Property {
+ return Property(getPropertyOrValueEnum(0, alias))
+}
+
+func getPropertyValueEnum(prop Property, alias string) int32 {
+ valueMapIdx := findProperty(prop)
+ if valueMapIdx == 0 {
+ return -1
+ }
+
+ valueMps := valueMaps()
+ valueMapIdx = int32(valueMps[valueMapIdx+1])
+ if valueMapIdx == 0 {
+ return -1
+ }
+ // valueMapIndex is the start of the property's valueMap,
+ // where the first word is the BytesTrie offset.
+ return getPropertyOrValueEnum(int32(valueMps[valueMapIdx]), alias)
+}
+
+func findProperty(prop Property) int32 {
+ var i = int32(1)
+ valueMps := valueMaps()
+ for numRanges := int32(valueMps[0]); numRanges > 0; numRanges-- {
+ start := int32(valueMps[i])
+ limit := int32(valueMps[i+1])
+ i += 2
+ if int32(prop) < start {
+ break
+ }
+ if int32(prop) < limit {
+ return i + (int32(prop)-start)*2
+ }
+ i += (limit - start) * 2
+ }
+ return 0
+}
+
+func getPropertyOrValueEnum(offset int32, alias string) int32 {
+ trie := bytestrie.New(byteTrie()[offset:])
+ if trie.ContainsName(alias) {
+ return trie.GetValue()
+ }
+ return -1
+}
+
+func comparePropertyNames(name1, name2 string) int {
+ next := func(s string) (byte, string) {
+ for len(s) > 0 && (s[0] == 0x2d || s[0] == 0x5f || s[0] == 0x20 || (0x09 <= s[0] && s[0] <= 0x0d)) {
+ s = s[1:]
+ }
+ if len(s) == 0 {
+ return 0, ""
+ }
+ c := s[0]
+ s = s[1:]
+ if 'A' <= c && c <= 'Z' {
+ c += 'a' - 'A'
+ }
+ return c, s
+ }
+
+ var r1, r2 byte
+ for {
+ r1, name1 = next(name1)
+ r2, name2 = next(name2)
+
+ if r1 == 0 && r2 == 0 {
+ return 0
+ }
+
+ /* Compare the lowercased characters */
+ if r1 != r2 {
+ return int(r1) - int(r2)
+ }
+ }
+}
+
+func getIntPropertyValue(c rune, which Property) int32 {
+ if which < UCharIntStart {
+ if UCharBinaryStart <= which && which < uCharBinaryLimit {
+ prop := binProps[which]
+ if prop.contains == nil {
+ return 0
+ }
+ if prop.contains(prop, c, which) {
+ return 1
+ }
+ return 0
+ }
+ } else if which < uCharIntLimit {
+ iprop := intProps[which-UCharIntStart]
+ return iprop.getValue(iprop, c, which)
+ } else if which == UCharGeneralCategoryMask {
+ return int32(uchar.Mask(uchar.CharType(c)))
+ }
+ return 0 // undefined
+}
+
+func mergeScriptCodeOrIndex(scriptX uint32) uint32 {
+ return ((scriptX & scriptHighMask) >> scriptHighShift) |
+ (scriptX & scriptLowMask)
+}
+
+func script(c rune) int32 {
+ if c > 0x10ffff {
+ return -1
+ }
+ scriptX := uchar.GetUnicodeProperties(c, 0) & scriptXMask
+ codeOrIndex := mergeScriptCodeOrIndex(scriptX)
+
+ if scriptX < scriptXWithCommon {
+ return int32(codeOrIndex)
+ } else if scriptX < scriptXWithInherited {
+ return 0
+ } else if scriptX < scriptXWithOther {
+ return 1
+ } else {
+ return int32(uchar.ScriptExtension(codeOrIndex))
+ }
+}
diff --git a/go/mysql/icuregex/internal/uprops/uprops_binary.go b/go/mysql/icuregex/internal/uprops/uprops_binary.go
new file mode 100644
index 00000000000..5d4aaaec1b5
--- /dev/null
+++ b/go/mysql/icuregex/internal/uprops/uprops_binary.go
@@ -0,0 +1,249 @@
+/*
+© 2016 and later: Unicode, Inc. and others.
+Copyright (C) 2004-2015, International Business Machines Corporation and others.
+Copyright 2023 The Vitess Authors.
+
+This file contains code derived from the Unicode Project's ICU library.
+License & terms of use for the original code: http://www.unicode.org/copyright.html
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package uprops
+
+import (
+ "slices"
+
+ "vitess.io/vitess/go/mysql/icuregex/internal/normalizer"
+ "vitess.io/vitess/go/mysql/icuregex/internal/ubidi"
+ "vitess.io/vitess/go/mysql/icuregex/internal/ucase"
+ "vitess.io/vitess/go/mysql/icuregex/internal/uchar"
+ "vitess.io/vitess/go/mysql/icuregex/internal/uemoji"
+)
+
+type binaryProperty struct {
+ column propertySource
+ mask uint32
+ contains func(prop *binaryProperty, c rune, which Property) bool
+}
+
+func defaultContains(prop *binaryProperty, c rune, _ Property) bool {
+ return (uchar.GetUnicodeProperties(c, int(prop.column)) & prop.mask) != 0
+}
+
+var binProps = [uCharBinaryLimit]*binaryProperty{
+ /*
+ * column and mask values for binary properties from u_getUnicodeProperties().
+ * Must be in order of corresponding UProperty,
+ * and there must be exactly one entry per binary UProperty.
+ *
+ * Properties with mask==0 are handled in code.
+ * For them, column is the UPropertySource value.
+ *
+ * See also https://unicode-org.github.io/icu/userguide/strings/properties.html
+ */
+ {1, uchar.Mask(pAlphabetic), defaultContains},
+ {1, uchar.Mask(pASCIIHexDigit), defaultContains},
+ {srcBidi, 0, isBidiControl},
+ {srcBidi, 0, isMirrored},
+ {1, uchar.Mask(pDash), defaultContains},
+ {1, uchar.Mask(pDefaultIgnorableCodePoint), defaultContains},
+ {1, uchar.Mask(pDeprecated), defaultContains},
+ {1, uchar.Mask(pDiacritic), defaultContains},
+ {1, uchar.Mask(pExtender), defaultContains},
+ {srcNfc, 0, hasFullCompositionExclusion},
+ {1, uchar.Mask(pGraphemeBase), defaultContains},
+ {1, uchar.Mask(pGraphemeExtend), defaultContains},
+ {1, uchar.Mask(pGraphemeLink), defaultContains},
+ {1, uchar.Mask(pHexDigit), defaultContains},
+ {1, uchar.Mask(pHyphen), defaultContains},
+ {1, uchar.Mask(pIDContinue), defaultContains},
+ {1, uchar.Mask(pIDStart), defaultContains},
+ {1, uchar.Mask(pIdeographic), defaultContains},
+ {1, uchar.Mask(pIdsBinaryOperator), defaultContains},
+ {1, uchar.Mask(pIdsTrinaryOperator), defaultContains},
+ {srcBidi, 0, isJoinControl},
+ {1, uchar.Mask(pLogicalOrderException), defaultContains},
+ {srcCase, 0, caseBinaryPropertyContains}, // UCHAR_LOWERCASE
+ {1, uchar.Mask(pMath), defaultContains},
+ {1, uchar.Mask(pNoncharacterCodePoint), defaultContains},
+ {1, uchar.Mask(pQuotationMark), defaultContains},
+ {1, uchar.Mask(pRadical), defaultContains},
+ {srcCase, 0, caseBinaryPropertyContains}, // UCHAR_SOFT_DOTTED
+ {1, uchar.Mask(pTerminalPunctuation), defaultContains},
+ {1, uchar.Mask(pUnifiedIdeograph), defaultContains},
+ {srcCase, 0, caseBinaryPropertyContains}, // UCHAR_UPPERCASE
+ {1, uchar.Mask(pWhiteSpace), defaultContains},
+ {1, uchar.Mask(pXidContinue), defaultContains},
+ {1, uchar.Mask(pXidStart), defaultContains},
+ {srcCase, 0, caseBinaryPropertyContains}, // UCHAR_CASE_SENSITIVE
+ {1, uchar.Mask(pSTerm), defaultContains},
+ {1, uchar.Mask(pVariationSelector), defaultContains},
+ {srcNfc, 0, isNormInert}, // UCHAR_NFD_INERT
+ {srcNfkc, 0, isNormInert}, // UCHAR_NFKD_INERT
+ {srcNfc, 0, isNormInert}, // UCHAR_NFC_INERT
+ {srcNfkc, 0, isNormInert}, // UCHAR_NFKC_INERT
+ {srcNfcCanonIter, 0, nil}, // Segment_Starter is currently unsupported
+ {1, uchar.Mask(pPatternSyntax), defaultContains},
+ {1, uchar.Mask(pPatternWhiteSpace), defaultContains},
+ {srcCharAndPropsvec, 0, isPOSIXAlnum},
+ {srcChar, 0, isPOSIXBlank},
+ {srcChar, 0, isPOSIXGraph},
+ {srcChar, 0, isPOSIXPrint},
+ {srcChar, 0, isPOSIXXdigit},
+ {srcCase, 0, caseBinaryPropertyContains}, // UCHAR_CASED
+ {srcCase, 0, caseBinaryPropertyContains}, // UCHAR_CASE_IGNORABLE
+ {srcCase, 0, caseBinaryPropertyContains}, // UCHAR_CHANGES_WHEN_LOWERCASED
+ {srcCase, 0, caseBinaryPropertyContains}, // UCHAR_CHANGES_WHEN_UPPERCASED
+ {srcCase, 0, caseBinaryPropertyContains}, // UCHAR_CHANGES_WHEN_TITLECASED
+ {srcCaseAndNorm, 0, changesWhenCasefolded},
+ {srcCase, 0, caseBinaryPropertyContains}, // UCHAR_CHANGES_WHEN_CASEMAPPED
+ {srcNfkcCf, 0, nil}, // Changes_When_NFKC_Casefolded is currently unsupported
+ {srcEmoji, 0, hasEmojiProperty}, // UCHAR_EMOJI
+ {srcEmoji, 0, hasEmojiProperty}, // UCHAR_EMOJI_PRESENTATION
+ {srcEmoji, 0, hasEmojiProperty}, // UCHAR_EMOJI_MODIFIER
+ {srcEmoji, 0, hasEmojiProperty}, // UCHAR_EMOJI_MODIFIER_BASE
+ {srcEmoji, 0, hasEmojiProperty}, // UCHAR_EMOJI_COMPONENT
+ {2, 0, isRegionalIndicator},
+ {1, uchar.Mask(pPrependedConcatenationMark), defaultContains},
+ {srcEmoji, 0, hasEmojiProperty}, // UCHAR_EXTENDED_PICTOGRAPHIC
+ {srcEmoji, 0, hasEmojiProperty}, // UCHAR_BASIC_EMOJI
+ {srcEmoji, 0, hasEmojiProperty}, // UCHAR_EMOJI_KEYCAP_SEQUENCE
+ {srcEmoji, 0, hasEmojiProperty}, // UCHAR_RGI_EMOJI_MODIFIER_SEQUENCE
+ {srcEmoji, 0, hasEmojiProperty}, // UCHAR_RGI_EMOJI_FLAG_SEQUENCE
+ {srcEmoji, 0, hasEmojiProperty}, // UCHAR_RGI_EMOJI_TAG_SEQUENCE
+ {srcEmoji, 0, hasEmojiProperty}, // UCHAR_RGI_EMOJI_ZWJ_SEQUENCE
+ {srcEmoji, 0, hasEmojiProperty}, // UCHAR_RGI_EMOJI
+}
+
+func isBidiControl(_ *binaryProperty, c rune, _ Property) bool {
+ return ubidi.IsBidiControl(c)
+}
+
+func isMirrored(_ *binaryProperty, c rune, _ Property) bool {
+ return ubidi.IsMirrored(c)
+}
+
+func isRegionalIndicator(_ *binaryProperty, c rune, _ Property) bool {
+ return 0x1F1E6 <= c && c <= 0x1F1FF
+}
+
+func changesWhenCasefolded(_ *binaryProperty, c rune, _ Property) bool {
+ if c < 0 {
+ return false
+ }
+
+ nfd := normalizer.Nfc().Decompose(c)
+ if nfd == nil {
+ nfd = []rune{c}
+ }
+ folded := ucase.FoldRunes(nfd)
+ return !slices.Equal(nfd, folded)
+}
+
+func isPOSIXXdigit(_ *binaryProperty, c rune, _ Property) bool {
+ return uchar.IsXDigit(c)
+}
+
+func isPOSIXPrint(_ *binaryProperty, c rune, _ Property) bool {
+ return uchar.IsPOSIXPrint(c)
+}
+
+func isPOSIXGraph(_ *binaryProperty, c rune, _ Property) bool {
+ return uchar.IsGraphPOSIX(c)
+}
+
+func isPOSIXBlank(_ *binaryProperty, c rune, _ Property) bool {
+ return uchar.IsBlank(c)
+}
+
+func isPOSIXAlnum(_ *binaryProperty, c rune, _ Property) bool {
+ return (uchar.GetUnicodeProperties(c, 1)&uchar.Mask(pAlphabetic)) != 0 || uchar.IsDigit(c)
+}
+
+func isJoinControl(_ *binaryProperty, c rune, _ Property) bool {
+ return ubidi.IsJoinControl(c)
+}
+
+func hasFullCompositionExclusion(_ *binaryProperty, c rune, _ Property) bool {
+ impl := normalizer.Nfc()
+ return impl.IsCompNo(c)
+}
+
+func caseBinaryPropertyContains(_ *binaryProperty, c rune, which Property) bool {
+ return HasBinaryPropertyUcase(c, which)
+}
+
+func HasBinaryPropertyUcase(c rune, which Property) bool {
+ /* case mapping properties */
+ switch which {
+ case UCharLowercase:
+ return ucase.Lower == ucase.GetType(c)
+ case UCharUppercase:
+ return ucase.Upper == ucase.GetType(c)
+ case UCharSoftDotted:
+ return ucase.IsSoftDotted(c)
+ case UCharCaseSensitive:
+ return ucase.IsCaseSensitive(c)
+ case UCharCased:
+ return ucase.None != ucase.GetType(c)
+ case UCharCaseIgnorable:
+ return (ucase.GetTypeOrIgnorable(c) >> 2) != 0
+ /*
+ * Note: The following Changes_When_Xyz are defined as testing whether
+ * the NFD form of the input changes when Xyz-case-mapped.
+ * However, this simpler implementation of these properties,
+ * ignoring NFD, passes the tests.
+ * The implementation needs to be changed if the tests start failing.
+ * When that happens, optimizations should be used to work with the
+ * per-single-code point ucase_toFullXyz() functions unless
+ * the NFD form has more than one code point,
+ * and the property starts set needs to be the union of the
+ * start sets for normalization and case mappings.
+ */
+ case UCharChangesWhenLowercased:
+ return ucase.ToFullLower(c) >= 0
+ case UCharChangesWhenUppercased:
+ return ucase.ToFullUpper(c) >= 0
+ case UCharChangesWhenTitlecased:
+ return ucase.ToFullTitle(c) >= 0
+ /* case UCHAR_CHANGES_WHEN_CASEFOLDED: -- in uprops.c */
+ case UCharChangesWhenCasemapped:
+ return ucase.ToFullLower(c) >= 0 || ucase.ToFullUpper(c) >= 0 || ucase.ToFullTitle(c) >= 0
+ default:
+ return false
+ }
+}
+
+func isNormInert(_ *binaryProperty, c rune, which Property) bool {
+ mode := normalizer.Mode(int32(which) - int32(UCharNfdInert) + int32(normalizer.NormNfd))
+ return normalizer.IsInert(c, mode)
+}
+
+func HasBinaryProperty(c rune, which Property) bool {
+ if which < UCharBinaryStart || uCharBinaryLimit <= which {
+ return false
+ }
+ prop := binProps[which]
+ if prop.contains == nil {
+ return false
+ }
+ return prop.contains(prop, c, which)
+}
+
+func hasEmojiProperty(_ *binaryProperty, c rune, which Property) bool {
+ if which < UCharEmoji || UCharRgiEmoji < which {
+ return false
+ }
+ return uemoji.HasBinaryProperty(c, int(which-UCharEmoji))
+}
diff --git a/go/mysql/icuregex/internal/uprops/uprops_int.go b/go/mysql/icuregex/internal/uprops/uprops_int.go
new file mode 100644
index 00000000000..3e62d31184f
--- /dev/null
+++ b/go/mysql/icuregex/internal/uprops/uprops_int.go
@@ -0,0 +1,265 @@
+/*
+© 2016 and later: Unicode, Inc. and others.
+Copyright (C) 2004-2015, International Business Machines Corporation and others.
+Copyright 2023 The Vitess Authors.
+
+This file contains code derived from the Unicode Project's ICU library.
+License & terms of use for the original code: http://www.unicode.org/copyright.html
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package uprops
+
+import (
+ "vitess.io/vitess/go/mysql/icuregex/internal/normalizer"
+ "vitess.io/vitess/go/mysql/icuregex/internal/ubidi"
+ "vitess.io/vitess/go/mysql/icuregex/internal/uchar"
+ "vitess.io/vitess/go/mysql/icuregex/internal/ulayout"
+)
+
+type intPropertyGetValue func(prop *intProperty, c rune, which Property) int32
+
+type intProperty struct {
+ column propertySource
+ mask uint32
+ shift int32
+ getValue intPropertyGetValue
+}
+
+const (
+ blockMask = 0x0001ff00
+ blockShift = 8
+
+ eaMask = 0x000e0000
+ eaShift = 17
+
+ lbMask = 0x03f00000
+ lbShift = 20
+
+ sbMask = 0x000f8000
+ sbShift = 15
+
+ wbMask = 0x00007c00
+ wbShift = 10
+
+ gcbMask = 0x000003e0
+ gcbShift = 5
+
+ dtMask = 0x0000001f
+)
+
+type numericType int32
+
+/**
+ * Numeric Type constants.
+ *
+ * @see UCHAR_NUMERIC_TYPE
+ * @stable ICU 2.2
+ */
+const (
+ /*
+ * Note: UNumericType constants are parsed by preparseucd.py.
+ * It matches lines like
+ * U_NT_
+ */
+
+ ntNone numericType = iota /*[None]*/
+ ntDecimal /*[de]*/
+ ntDigit /*[di]*/
+ ntNumeric /*[nu]*/
+ /**
+ * One more than the highest normal UNumericType value.
+ * The highest value is available via u_getIntPropertyMaxValue(UCHAR_NUMERIC_TYPE).
+ *
+ * @deprecated ICU 58 The numeric value may change over time, see ICU ticket #12420.
+ */
+ ntCount
+)
+
+/**
+ * Hangul Syllable Type constants.
+ *
+ * @see UCHAR_HANGUL_SYLLABLE_TYPE
+ * @stable ICU 2.6
+ */
+
+type hangunSyllableType int32
+
+const (
+ /*
+ * Note: UHangulSyllableType constants are parsed by preparseucd.py.
+ * It matches lines like
+ * U_HST_
+ */
+
+ hstNotApplicable hangunSyllableType = iota /*[NA]*/
+ hstLeadingJamo /*[L]*/
+ hstVowelJamo /*[V]*/
+ hstTrailingJamo /*[T]*/
+ hstLvSyllable /*[LV]*/
+ hstLvtSyllable /*[LVT]*/
+ /**
+ * One more than the highest normal UHangulSyllableType value.
+ * The highest value is available via u_getIntPropertyMaxValue(UCHAR_HANGUL_SYLLABLE_TYPE).
+ *
+ * @deprecated ICU 58 The numeric value may change over time, see ICU ticket #12420.
+ */
+ hstCount
+)
+
+var intProps = [uCharIntLimit - UCharIntStart]*intProperty{
+ /*
+ * column, mask and shift values for int-value properties from u_getUnicodeProperties().
+ * Must be in order of corresponding UProperty,
+ * and there must be exactly one entry per int UProperty.
+ *
+ * Properties with mask==0 are handled in code.
+ * For them, column is the UPropertySource value.
+ */
+ {srcBidi, 0, 0, getBiDiClass},
+ {0, blockMask, blockShift, defaultGetValue},
+ {srcNfc, 0, 0xff, getCombiningClass},
+ {2, dtMask, 0, defaultGetValue},
+ {0, eaMask, eaShift, defaultGetValue},
+ {srcChar, 0, int32(uchar.CharCategoryCount - 1), getGeneralCategory},
+ {srcBidi, 0, 0, getJoiningGroup},
+ {srcBidi, 0, 0, getJoiningType},
+ {2, lbMask, lbShift, defaultGetValue},
+ {srcChar, 0, int32(ntCount - 1), getNumericType},
+ {srcPropsvec, 0, 0, getScript},
+ {srcPropsvec, 0, int32(hstCount - 1), getHangulSyllableType},
+ // UCHAR_NFD_QUICK_CHECK: max=1=YES -- never "maybe", only "no" or "yes"
+ {srcNfc, 0, int32(normalizer.Yes), getNormQuickCheck},
+ // UCHAR_NFKD_QUICK_CHECK: max=1=YES -- never "maybe", only "no" or "yes"
+ {srcNfkc, 0, int32(normalizer.Yes), getNormQuickCheck},
+ // UCHAR_NFC_QUICK_CHECK: max=2=MAYBE
+ {srcNfc, 0, int32(normalizer.Maybe), getNormQuickCheck},
+ // UCHAR_NFKC_QUICK_CHECK: max=2=MAYBE
+ {srcNfkc, 0, int32(normalizer.Maybe), getNormQuickCheck},
+ {srcNfc, 0, 0xff, getLeadCombiningClass},
+ {srcNfc, 0, 0xff, getTrailCombiningClass},
+ {2, gcbMask, gcbShift, defaultGetValue},
+ {2, sbMask, sbShift, defaultGetValue},
+ {2, wbMask, wbShift, defaultGetValue},
+ {srcBidi, 0, 0, getBiDiPairedBracketType},
+ {srcInpc, 0, 0, getInPC},
+ {srcInsc, 0, 0, getInSC},
+ {srcVo, 0, 0, getVo},
+}
+
+func getVo(_ *intProperty, c rune, _ Property) int32 {
+ return int32(ulayout.VoTrie().Get(c))
+}
+
+func getInSC(_ *intProperty, c rune, _ Property) int32 {
+ return int32(ulayout.InscTrie().Get(c))
+}
+
+func getInPC(_ *intProperty, c rune, _ Property) int32 {
+ return int32(ulayout.InpcTrie().Get(c))
+}
+
+func getBiDiPairedBracketType(_ *intProperty, c rune, _ Property) int32 {
+ return int32(ubidi.PairedBracketType(c))
+}
+
+func getTrailCombiningClass(_ *intProperty, c rune, _ Property) int32 {
+ return int32(normalizer.Nfc().GetFCD16(c) & 0xff)
+}
+
+func getLeadCombiningClass(_ *intProperty, c rune, _ Property) int32 {
+ val := int32(normalizer.Nfc().GetFCD16(c) >> 8)
+ return val
+}
+
+func getNormQuickCheck(_ *intProperty, c rune, which Property) int32 {
+ return int32(normalizer.QuickCheck(c, normalizer.Mode(int32(which)-int32(UCharNfdQuickCheck)+int32(normalizer.NormNfd))))
+}
+
+/*
+ * Map some of the Grapheme Cluster Break values to Hangul Syllable Types.
+ * Hangul_Syllable_Type is fully redundant with a subset of Grapheme_Cluster_Break.
+ */
+var gcbToHst = []hangunSyllableType{
+ hstNotApplicable, /* U_GCB_OTHER */
+ hstNotApplicable, /* U_GCB_CONTROL */
+ hstNotApplicable, /* U_GCB_CR */
+ hstNotApplicable, /* U_GCB_EXTEND */
+ hstLeadingJamo, /* U_GCB_L */
+ hstNotApplicable, /* U_GCB_LF */
+ hstLvSyllable, /* U_GCB_LV */
+ hstLvtSyllable, /* U_GCB_LVT */
+ hstTrailingJamo, /* U_GCB_T */
+ hstVowelJamo, /* U_GCB_V */
+ /*
+ * Omit GCB values beyond what we need for hst.
+ * The code below checks for the array length.
+ */
+}
+
+func getHangulSyllableType(_ *intProperty, c rune, _ Property) int32 {
+ /* see comments on gcbToHst[] above */
+ gcb := (int32(uchar.GetUnicodeProperties(c, 2)) & gcbMask) >> gcbShift
+
+ if gcb < int32(len(gcbToHst)) {
+ return int32(gcbToHst[gcb])
+ }
+ return int32(hstNotApplicable)
+}
+
+func getScript(_ *intProperty, c rune, _ Property) int32 {
+ return script(c)
+}
+
+func getNumericType(_ *intProperty, c rune, _ Property) int32 {
+ ntv := uchar.NumericTypeValue(c)
+ return int32(ntvGetType(ntv))
+}
+
+func getJoiningType(_ *intProperty, c rune, _ Property) int32 {
+ return int32(ubidi.JoinType(c))
+}
+
+func getJoiningGroup(_ *intProperty, c rune, _ Property) int32 {
+ return int32(ubidi.JoinGroup(c))
+}
+
+func getGeneralCategory(_ *intProperty, c rune, _ Property) int32 {
+ return int32(uchar.CharType(c))
+}
+
+func getCombiningClass(_ *intProperty, c rune, _ Property) int32 {
+ return int32(normalizer.Nfc().CombiningClass(c))
+}
+
+func defaultGetValue(prop *intProperty, c rune, _ Property) int32 {
+ return int32(uchar.GetUnicodeProperties(c, int(prop.column))&prop.mask) >> prop.shift
+}
+
+func getBiDiClass(_ *intProperty, c rune, _ Property) int32 {
+ return int32(ubidi.Class(c))
+}
+
+func ntvGetType(ntv uint16) numericType {
+ switch {
+ case ntv == uchar.UPropsNtvNone:
+ return ntNone
+ case ntv < uchar.UPropsNtvDigitStart:
+ return ntDecimal
+ case ntv < uchar.UPropsNtvNumericStart:
+ return ntDigit
+ default:
+ return ntNumeric
+ }
+}
diff --git a/go/mysql/icuregex/internal/uprops/uscript.go b/go/mysql/icuregex/internal/uprops/uscript.go
new file mode 100644
index 00000000000..8a4423849df
--- /dev/null
+++ b/go/mysql/icuregex/internal/uprops/uscript.go
@@ -0,0 +1,505 @@
+/*
+© 2016 and later: Unicode, Inc. and others.
+Copyright (C) 2004-2015, International Business Machines Corporation and others.
+Copyright 2023 The Vitess Authors.
+
+This file contains code derived from the Unicode Project's ICU library.
+License & terms of use for the original code: http://www.unicode.org/copyright.html
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package uprops
+
+import "vitess.io/vitess/go/mysql/icuregex/internal/uchar"
+
+/**
+ * Constants for ISO 15924 script codes.
+ *
+ * The current set of script code constants supports at least all scripts
+ * that are encoded in the version of Unicode which ICU currently supports.
+ * The names of the constants are usually derived from the
+ * Unicode script property value aliases.
+ * See UAX #24 Unicode Script Property (http://www.unicode.org/reports/tr24/)
+ * and http://www.unicode.org/Public/UCD/latest/ucd/PropertyValueAliases.txt .
+ *
+ * In addition, constants for many ISO 15924 script codes
+ * are included, for use with language tags, CLDR data, and similar.
+ * Some of those codes are not used in the Unicode Character Database (UCD).
+ * For example, there are no characters that have a UCD script property value of
+ * Hans or Hant. All Han ideographs have the Hani script property value in Unicode.
+ *
+ * Private-use codes Qaaa..Qabx are not included, except as used in the UCD or in CLDR.
+ *
+ * Starting with ICU 55, script codes are only added when their scripts
+ * have been or will certainly be encoded in Unicode,
+ * and have been assigned Unicode script property value aliases,
+ * to ensure that their script names are stable and match the names of the constants.
+ * Script codes like Latf and Aran that are not subject to separate encoding
+ * may be added at any time.
+ *
+ * @stable ICU 2.2
+ */
+type code int32
+
+/*
+ * Note: UScriptCode constants and their ISO script code comments
+ * are parsed by preparseucd.py.
+ * It matches lines like
+ * USCRIPT_ = , / * * /
+ */
+
+const (
+ /** @stable ICU 2.2 */
+ invalidCode code = -1
+ /** @stable ICU 2.2 */
+ common code = 0 /* Zyyy */
+ /** @stable ICU 2.2 */
+ inherited code = 1 /* Zinh */ /* "Code for inherited script", for non-spacing combining marks; also Qaai */
+ /** @stable ICU 2.2 */
+ arabic code = 2 /* Arab */
+ /** @stable ICU 2.2 */
+ armenian code = 3 /* Armn */
+ /** @stable ICU 2.2 */
+ bengali code = 4 /* Beng */
+ /** @stable ICU 2.2 */
+ bopomofo code = 5 /* Bopo */
+ /** @stable ICU 2.2 */
+ cherokee code = 6 /* Cher */
+ /** @stable ICU 2.2 */
+ coptic code = 7 /* Copt */
+ /** @stable ICU 2.2 */
+ cyrillic code = 8 /* Cyrl */
+ /** @stable ICU 2.2 */
+ deseret code = 9 /* Dsrt */
+ /** @stable ICU 2.2 */
+ devanagari code = 10 /* Deva */
+ /** @stable ICU 2.2 */
+ ethiopic code = 11 /* Ethi */
+ /** @stable ICU 2.2 */
+ georgian code = 12 /* Geor */
+ /** @stable ICU 2.2 */
+ gothic code = 13 /* Goth */
+ /** @stable ICU 2.2 */
+ greek code = 14 /* Grek */
+ /** @stable ICU 2.2 */
+ gujarati code = 15 /* Gujr */
+ /** @stable ICU 2.2 */
+ gurmukhi code = 16 /* Guru */
+ /** @stable ICU 2.2 */
+ han code = 17 /* Hani */
+ /** @stable ICU 2.2 */
+ hangul code = 18 /* Hang */
+ /** @stable ICU 2.2 */
+ hebrew code = 19 /* Hebr */
+ /** @stable ICU 2.2 */
+ hiragana code = 20 /* Hira */
+ /** @stable ICU 2.2 */
+ kannada code = 21 /* Knda */
+ /** @stable ICU 2.2 */
+ katakana code = 22 /* Kana */
+ /** @stable ICU 2.2 */
+ khmer code = 23 /* Khmr */
+ /** @stable ICU 2.2 */
+ lao code = 24 /* Laoo */
+ /** @stable ICU 2.2 */
+ latin code = 25 /* Latn */
+ /** @stable ICU 2.2 */
+ malayalam code = 26 /* Mlym */
+ /** @stable ICU 2.2 */
+ mongolian code = 27 /* Mong */
+ /** @stable ICU 2.2 */
+ myanmar code = 28 /* Mymr */
+ /** @stable ICU 2.2 */
+ ogham code = 29 /* Ogam */
+ /** @stable ICU 2.2 */
+ oldItalic code = 30 /* Ital */
+ /** @stable ICU 2.2 */
+ oriya code = 31 /* Orya */
+ /** @stable ICU 2.2 */
+ runic code = 32 /* Runr */
+ /** @stable ICU 2.2 */
+ sinhala code = 33 /* Sinh */
+ /** @stable ICU 2.2 */
+ syriac code = 34 /* Syrc */
+ /** @stable ICU 2.2 */
+ tamil code = 35 /* Taml */
+ /** @stable ICU 2.2 */
+ telugu code = 36 /* Telu */
+ /** @stable ICU 2.2 */
+ thaana code = 37 /* Thaa */
+ /** @stable ICU 2.2 */
+ thai code = 38 /* Thai */
+ /** @stable ICU 2.2 */
+ tibetan code = 39 /* Tibt */
+ /** Canadian_Aboriginal script. @stable ICU 2.6 */
+ canadianAboriginal code = 40 /* Cans */
+ /** Canadian_Aboriginal script (alias). @stable ICU 2.2 */
+ ucas code = canadianAboriginal
+ /** @stable ICU 2.2 */
+ yi code = 41 /* Yiii */
+ /* New scripts in Unicode 3.2 */
+ /** @stable ICU 2.2 */
+ tagalog code = 42 /* Tglg */
+ /** @stable ICU 2.2 */
+ hanunoo code = 43 /* Hano */
+ /** @stable ICU 2.2 */
+ buhid code = 44 /* Buhd */
+ /** @stable ICU 2.2 */
+ tagbanwa code = 45 /* Tagb */
+
+ /* New scripts in Unicode 4 */
+ /** @stable ICU 2.6 */
+ braille code = 46 /* Brai */
+ /** @stable ICU 2.6 */
+ cypriot code = 47 /* Cprt */
+ /** @stable ICU 2.6 */
+ limbu code = 48 /* Limb */
+ /** @stable ICU 2.6 */
+ linearB code = 49 /* Linb */
+ /** @stable ICU 2.6 */
+ osmanya code = 50 /* Osma */
+ /** @stable ICU 2.6 */
+ shavian code = 51 /* Shaw */
+ /** @stable ICU 2.6 */
+ taiLe code = 52 /* Tale */
+ /** @stable ICU 2.6 */
+ ugaratic code = 53 /* Ugar */
+
+ /** New script code in Unicode 4.0.1 @stable ICU 3.0 */
+ katakanaOrHiragana = 54 /*Hrkt */
+
+ /* New scripts in Unicode 4.1 */
+ /** @stable ICU 3.4 */
+ buginese code = 55 /* Bugi */
+ /** @stable ICU 3.4 */
+ glagolitic code = 56 /* Glag */
+ /** @stable ICU 3.4 */
+ kharoshthi code = 57 /* Khar */
+ /** @stable ICU 3.4 */
+ sylotiNagri code = 58 /* Sylo */
+ /** @stable ICU 3.4 */
+ newTaiLue code = 59 /* Talu */
+ /** @stable ICU 3.4 */
+ tifinagh code = 60 /* Tfng */
+ /** @stable ICU 3.4 */
+ oldPersian code = 61 /* Xpeo */
+
+ /* New script codes from Unicode and ISO 15924 */
+ /** @stable ICU 3.6 */
+ balinese code = 62 /* Bali */
+ /** @stable ICU 3.6 */
+ batak code = 63 /* Batk */
+ /** @stable ICU 3.6 */
+ blissymbols code = 64 /* Blis */
+ /** @stable ICU 3.6 */
+ brahmi code = 65 /* Brah */
+ /** @stable ICU 3.6 */
+ cham code = 66 /* Cham */
+ /** @stable ICU 3.6 */
+ cirth code = 67 /* Cirt */
+ /** @stable ICU 3.6 */
+ oldChurchSlavonicCyrillic code = 68 /* Cyrs */
+ /** @stable ICU 3.6 */
+ demoticEgyptian code = 69 /* Egyd */
+ /** @stable ICU 3.6 */
+ hieraticEgyptian code = 70 /* Egyh */
+ /** @stable ICU 3.6 */
+ egyptianHieroglyphs code = 71 /* Egyp */
+ /** @stable ICU 3.6 */
+ khutsuri code = 72 /* Geok */
+ /** @stable ICU 3.6 */
+ simplfiedHan code = 73 /* Hans */
+ /** @stable ICU 3.6 */
+ traditionalHan code = 74 /* Hant */
+ /** @stable ICU 3.6 */
+ pahawhHmong code = 75 /* Hmng */
+ /** @stable ICU 3.6 */
+ oldHungarian code = 76 /* Hung */
+ /** @stable ICU 3.6 */
+ harappanIndus code = 77 /* Inds */
+ /** @stable ICU 3.6 */
+ javanese code = 78 /* Java */
+ /** @stable ICU 3.6 */
+ kayahLi code = 79 /* Kali */
+ /** @stable ICU 3.6 */
+ latinFraktur code = 80 /* Latf */
+ /** @stable ICU 3.6 */
+ latinGaelic code = 81 /* Latg */
+ /** @stable ICU 3.6 */
+ lepcha code = 82 /* Lepc */
+ /** @stable ICU 3.6 */
+ linearA code = 83 /* Lina */
+ /** @stable ICU 4.6 */
+ mandaic code = 84 /* Mand */
+ /** @stable ICU 3.6 */
+ mandaean code = mandaic
+ /** @stable ICU 3.6 */
+ mayanHieroglyphs code = 85 /* Maya */
+ /** @stable ICU 4.6 */
+ meroiticHieroglyphs code = 86 /* Mero */
+ /** @stable ICU 3.6 */
+ meroitic code = meroiticHieroglyphs
+ /** @stable ICU 3.6 */
+ nko code = 87 /* Nkoo */
+ /** @stable ICU 3.6 */
+ orkhon code = 88 /* Orkh */
+ /** @stable ICU 3.6 */
+ oldPermic code = 89 /* Perm */
+ /** @stable ICU 3.6 */
+ phagsPa code = 90 /* Phag */
+ /** @stable ICU 3.6 */
+ phoenician code = 91 /* Phnx */
+ /** @stable ICU 52 */
+ miao code = 92 /* Plrd */
+ /** @stable ICU 3.6 */
+ phoneticPollard code = miao
+ /** @stable ICU 3.6 */
+ rongoRongo code = 93 /* Roro */
+ /** @stable ICU 3.6 */
+ sarati code = 94 /* Sara */
+ /** @stable ICU 3.6 */
+ extrangeloSyriac code = 95 /* Syre */
+ /** @stable ICU 3.6 */
+ westernSyriac code = 96 /* Syrj */
+ /** @stable ICU 3.6 */
+ easternSyriac code = 97 /* Syrn */
+ /** @stable ICU 3.6 */
+ tengwar code = 98 /* Teng */
+ /** @stable ICU 3.6 */
+ vai code = 99 /* Vaii */
+ /** @stable ICU 3.6 */
+ visibleSpeech code = 100 /* Visp */
+ /** @stable ICU 3.6 */
+ cuneiform code = 101 /* Xsux */
+ /** @stable ICU 3.6 */
+ unwrittenLanguages code = 102 /* Zxxx */
+ /** @stable ICU 3.6 */
+ unknown code = 103 /* Zzzz */ /* Unknown="Code for uncoded script", for unassigned code points */
+
+ /** @stable ICU 3.8 */
+ carian code = 104 /* Cari */
+ /** @stable ICU 3.8 */
+ japanese code = 105 /* Jpan */
+ /** @stable ICU 3.8 */
+ lanna code = 106 /* Lana */
+ /** @stable ICU 3.8 */
+ lycian code = 107 /* Lyci */
+ /** @stable ICU 3.8 */
+ lydian code = 108 /* Lydi */
+ /** @stable ICU 3.8 */
+ olChiki code = 109 /* Olck */
+ /** @stable ICU 3.8 */
+ rejang code = 110 /* Rjng */
+ /** @stable ICU 3.8 */
+ saurashtra code = 111 /* Saur */
+ /** Sutton SignWriting @stable ICU 3.8 */
+ signWriting code = 112 /* Sgnw */
+ /** @stable ICU 3.8 */
+ sundanese code = 113 /* Sund */
+ /** @stable ICU 3.8 */
+ moon code = 114 /* Moon */
+ /** @stable ICU 3.8 */
+ meiteiMayek code = 115 /* Mtei */
+
+ /** @stable ICU 4.0 */
+ imperialAramaic code = 116 /* Armi */
+ /** @stable ICU 4.0 */
+ avestan code = 117 /* Avst */
+ /** @stable ICU 4.0 */
+ chakma code = 118 /* Cakm */
+ /** @stable ICU 4.0 */
+ korean code = 119 /* Kore */
+ /** @stable ICU 4.0 */
+ kaithi code = 120 /* Kthi */
+ /** @stable ICU 4.0 */
+ manichaean code = 121 /* Mani */
+ /** @stable ICU 4.0 */
+ inscriptionalPahlavi code = 122 /* Phli */
+ /** @stable ICU 4.0 */
+ psalterPahlavi code = 123 /* Phlp */
+ /** @stable ICU 4.0 */
+ bookPahlavi code = 124 /* Phlv */
+ /** @stable ICU 4.0 */
+ inscriptionalParthian code = 125 /* Prti */
+ /** @stable ICU 4.0 */
+ samaritan code = 126 /* Samr */
+ /** @stable ICU 4.0 */
+ taiViet code = 127 /* Tavt */
+ /** @stable ICU 4.0 */
+ mathematicalNotation code = 128 /* Zmth */
+ /** @stable ICU 4.0 */
+ symbols code = 129 /* Zsym */
+
+ /** @stable ICU 4.4 */
+ bamum code = 130 /* Bamu */
+ /** @stable ICU 4.4 */
+ lisu code = 131 /* Lisu */
+ /** @stable ICU 4.4 */
+ nakhiGeba code = 132 /* Nkgb */
+ /** @stable ICU 4.4 */
+ oldSouthArabian code = 133 /* Sarb */
+
+ /** @stable ICU 4.6 */
+ bassaVah code = 134 /* Bass */
+ /** @stable ICU 54 */
+ duployan code = 135 /* Dupl */
+ /** @stable ICU 4.6 */
+ elbasan code = 136 /* Elba */
+ /** @stable ICU 4.6 */
+ grantha code = 137 /* Gran */
+ /** @stable ICU 4.6 */
+ kpelle code = 138 /* Kpel */
+ /** @stable ICU 4.6 */
+ loma code = 139 /* Loma */
+ /** Mende Kikakui @stable ICU 4.6 */
+ mende code = 140 /* Mend */
+ /** @stable ICU 4.6 */
+ meroiticCursive code = 141 /* Merc */
+ /** @stable ICU 4.6 */
+ oldNorthArabian code = 142 /* Narb */
+ /** @stable ICU 4.6 */
+ nabataean code = 143 /* Nbat */
+ /** @stable ICU 4.6 */
+ palmyrene code = 144 /* Palm */
+ /** @stable ICU 54 */
+ khudawadi code = 145 /* Sind */
+ /** @stable ICU 4.6 */
+ sindhi code = khudawadi
+ /** @stable ICU 4.6 */
+ warangCiti code = 146 /* Wara */
+
+ /** @stable ICU 4.8 */
+ afaka code = 147 /* Afak */
+ /** @stable ICU 4.8 */
+ jurchen code = 148 /* Jurc */
+ /** @stable ICU 4.8 */
+ mro code = 149 /* Mroo */
+ /** @stable ICU 4.8 */
+ nushu code = 150 /* Nshu */
+ /** @stable ICU 4.8 */
+ sharada code = 151 /* Shrd */
+ /** @stable ICU 4.8 */
+ soraSompeng code = 152 /* Sora */
+ /** @stable ICU 4.8 */
+ takri code = 153 /* Takr */
+ /** @stable ICU 4.8 */
+ tangut code = 154 /* Tang */
+ /** @stable ICU 4.8 */
+ woleai code = 155 /* Wole */
+
+ /** @stable ICU 49 */
+ anatolianHieroglyphs code = 156 /* Hluw */
+ /** @stable ICU 49 */
+ khojki code = 157 /* Khoj */
+ /** @stable ICU 49 */
+ tirhuta code = 158 /* Tirh */
+
+ /** @stable ICU 52 */
+ caucasianAlbanian code = 159 /* Aghb */
+ /** @stable ICU 52 */
+ mahajani code = 160 /* Mahj */
+
+ /** @stable ICU 54 */
+ ahom code = 161 /* Ahom */
+ /** @stable ICU 54 */
+ hatran code = 162 /* Hatr */
+ /** @stable ICU 54 */
+ modi code = 163 /* Modi */
+ /** @stable ICU 54 */
+ multani code = 164 /* Mult */
+ /** @stable ICU 54 */
+ pauCinHau code = 165 /* Pauc */
+ /** @stable ICU 54 */
+ siddham code = 166 /* Sidd */
+
+ /** @stable ICU 58 */
+ adlam code = 167 /* Adlm */
+ /** @stable ICU 58 */
+ bhaiksuki code = 168 /* Bhks */
+ /** @stable ICU 58 */
+ marchen code = 169 /* Marc */
+ /** @stable ICU 58 */
+ newa code = 170 /* Newa */
+ /** @stable ICU 58 */
+ osage code = 171 /* Osge */
+
+ /** @stable ICU 58 */
+ hanWithBopomofo code = 172 /* Hanb */
+ /** @stable ICU 58 */
+ jamo code = 173 /* Jamo */
+ /** @stable ICU 58 */
+ symbolsEmoji code = 174 /* Zsye */
+
+ /** @stable ICU 60 */
+ masaramGondi code = 175 /* Gonm */
+ /** @stable ICU 60 */
+ soyombo code = 176 /* Soyo */
+ /** @stable ICU 60 */
+ zanabazarSquare code = 177 /* Zanb */
+
+ /** @stable ICU 62 */
+ dogra code = 178 /* Dogr */
+ /** @stable ICU 62 */
+ gunjalaGondi code = 179 /* Gong */
+ /** @stable ICU 62 */
+ makasar code = 180 /* Maka */
+ /** @stable ICU 62 */
+ medefaidrin code = 181 /* Medf */
+ /** @stable ICU 62 */
+ hanifiRohingya code = 182 /* Rohg */
+ /** @stable ICU 62 */
+ sogdian code = 183 /* Sogd */
+ /** @stable ICU 62 */
+ oldSogdian code = 184 /* Sogo */
+
+ /** @stable ICU 64 */
+ elymaic code = 185 /* Elym */
+ /** @stable ICU 64 */
+ nyiakengPuachueHmong code = 186 /* Hmnp */
+ /** @stable ICU 64 */
+ nandinagari code = 187 /* Nand */
+ /** @stable ICU 64 */
+ wancho code = 188 /* Wcho */
+
+ /** @stable ICU 66 */
+ chorasmian code = 189 /* Chrs */
+ /** @stable ICU 66 */
+ divesAkuru code = 190 /* Diak */
+ /** @stable ICU 66 */
+ khitanSmallScript code = 191 /* Kits */
+ /** @stable ICU 66 */
+ yezedi code = 192 /* Yezi */
+)
+
+func uscriptHasScript(c rune, sc code) bool {
+ scriptX := uchar.GetUnicodeProperties(c, 0) & scriptXMask
+ codeOrIndex := mergeScriptCodeOrIndex(scriptX)
+ if scriptX < scriptXWithCommon {
+ return sc == code(codeOrIndex)
+ }
+
+ scx := uchar.ScriptExtensions(codeOrIndex)
+ if scriptX >= scriptXWithOther {
+ scx = uchar.ScriptExtensions(uint32(scx[1]))
+ }
+ sc32 := uint32(sc)
+ if sc32 > 0x7fff {
+ /* Guard against bogus input that would make us go past the Script_Extensions terminator. */
+ return false
+ }
+ for sc32 > uint32(scx[0]) {
+ scx = scx[1:]
+ }
+ return sc32 == uint32(scx[0]&0x7fff)
+}
diff --git a/go/mysql/icuregex/internal/uset/close.go b/go/mysql/icuregex/internal/uset/close.go
new file mode 100644
index 00000000000..bd3f9f0f7e3
--- /dev/null
+++ b/go/mysql/icuregex/internal/uset/close.go
@@ -0,0 +1,96 @@
+/*
+© 2016 and later: Unicode, Inc. and others.
+Copyright (C) 2004-2015, International Business Machines Corporation and others.
+Copyright 2023 The Vitess Authors.
+
+This file contains code derived from the Unicode Project's ICU library.
+License & terms of use for the original code: http://www.unicode.org/copyright.html
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package uset
+
+import "vitess.io/vitess/go/mysql/icuregex/internal/ucase"
+
+type USet uint32
+
+const (
+ /**
+ * Ignore white space within patterns unless quoted or escaped.
+ * @stable ICU 2.4
+ */
+ IgnoreSpace USet = 1
+
+ /**
+ * Enable case insensitive matching. E.g., "[ab]" with this flag
+ * will match 'a', 'A', 'b', and 'B'. "[^ab]" with this flag will
+ * match all except 'a', 'A', 'b', and 'B'. This performs a full
+ * closure over case mappings, e.g. U+017F for s.
+ *
+ * The resulting set is a superset of the input for the code points but
+ * not for the strings.
+ * It performs a case mapping closure of the code points and adds
+ * full case folding strings for the code points, and reduces strings of
+ * the original set to their full case folding equivalents.
+ *
+ * This is designed for case-insensitive matches, for example
+ * in regular expressions. The full code point case closure allows checking of
+ * an input character directly against the closure set.
+ * Strings are matched by comparing the case-folded form from the closure
+ * set with an incremental case folding of the string in question.
+ *
+ * The closure set will also contain single code points if the original
+ * set contained case-equivalent strings (like U+00DF for "ss" or "Ss" etc.).
+ * This is not necessary (that is, redundant) for the above matching method
+ * but results in the same closure sets regardless of whether the original
+ * set contained the code point or a string.
+ *
+ * @stable ICU 2.4
+ */
+ CaseInsensitive USet = 2
+
+ /**
+ * Enable case insensitive matching. E.g., "[ab]" with this flag
+ * will match 'a', 'A', 'b', and 'B'. "[^ab]" with this flag will
+ * match all except 'a', 'A', 'b', and 'B'. This adds the lower-,
+ * title-, and uppercase mappings as well as the case folding
+ * of each existing element in the set.
+ * @stable ICU 3.2
+ */
+ AddCaseMappings USet = 4
+)
+
+func (u *UnicodeSet) CloseOver(attribute USet) {
+ if attribute&AddCaseMappings != 0 {
+ panic("USET_ADD_CASE_MAPPINGS is unsupported")
+ }
+ if (attribute & CaseInsensitive) == 0 {
+ return
+ }
+
+ foldSet := u.Clone()
+ n := u.RangeCount()
+
+ for i := 0; i < n; i++ {
+ start := u.RangeStart(i)
+ end := u.RangeEnd(i)
+
+ // full case closure
+ for cp := start; cp <= end; cp++ {
+ ucase.AddCaseClosure(cp, foldSet)
+ }
+ }
+
+ *u = *foldSet
+}
diff --git a/go/mysql/icuregex/internal/uset/frozen.go b/go/mysql/icuregex/internal/uset/frozen.go
new file mode 100644
index 00000000000..2703a4f6975
--- /dev/null
+++ b/go/mysql/icuregex/internal/uset/frozen.go
@@ -0,0 +1,339 @@
+/*
+© 2016 and later: Unicode, Inc. and others.
+Copyright (C) 2004-2015, International Business Machines Corporation and others.
+Copyright 2023 The Vitess Authors.
+
+This file contains code derived from the Unicode Project's ICU library.
+License & terms of use for the original code: http://www.unicode.org/copyright.html
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package uset
+
+type frozen struct {
+ // One byte 0 or 1 per Latin-1 character.
+ latin1Contains [0x100]byte
+
+ // true if contains(U+FFFD)
+ containsFFFD bool
+
+ /*
+ * One bit per code point from U+0000..U+07FF.
+ * The bits are organized vertically; consecutive code points
+ * correspond to the same bit positions in consecutive table words.
+ * With code point parts
+ * lead=c{10..6}
+ * trail=c{5..0}
+ * it is set.contains(c)==(table7FF[trail] bit lead)
+ *
+ * Bits for 0..7F (non-shortest forms) are set to the result of contains(FFFD)
+ * for faster validity checking at runtime.
+ */
+ table7FF [64]uint32
+
+ /*
+ * One bit per 64 BMP code points.
+ * The bits are organized vertically; consecutive 64-code point blocks
+ * correspond to the same bit position in consecutive table words.
+ * With code point parts
+ * lead=c{15..12}
+ * t1=c{11..6}
+ * test bits (lead+16) and lead in bmpBlockBits[t1].
+ * If the upper bit is 0, then the lower bit indicates if contains(c)
+ * for all code points in the 64-block.
+ * If the upper bit is 1, then the block is mixed and set.contains(c)
+ * must be called.
+ *
+ * Bits for 0..7FF (non-shortest forms) and D800..DFFF are set to
+ * the result of contains(FFFD) for faster validity checking at runtime.
+ */
+ bmpBlockBits [64]uint32
+
+ /*
+ * Inversion list indexes for restricted binary searches in
+ * findCodePoint(), from
+ * findCodePoint(U+0800, U+1000, U+2000, .., U+F000, U+10000).
+ * U+0800 is the first 3-byte-UTF-8 code point. Code points below U+0800 are
+ * always looked up in the bit tables.
+ * The last pair of indexes is for finding supplementary code points.
+ */
+ list4kStarts [18]int32
+}
+
+func freeze(list []rune) *frozen {
+ f := &frozen{}
+
+ listEnd := int32(len(list) - 1)
+
+ f.list4kStarts[0] = f.findCodePoint(list, 0x800, 0, listEnd)
+ for i := 1; i <= 0x10; i++ {
+ f.list4kStarts[i] = f.findCodePoint(list, rune(i)<<12, f.list4kStarts[i-1], listEnd)
+ }
+ f.list4kStarts[0x11] = listEnd
+ f.containsFFFD = f.containsSlow(list, 0xfffd, f.list4kStarts[0xf], f.list4kStarts[0x10])
+
+ f.initBits(list)
+ f.overrideIllegal()
+
+ return f
+}
+
+func (f *frozen) containsSlow(list []rune, c rune, lo, hi int32) bool {
+ return (f.findCodePoint(list, c, lo, hi) & 1) != 0
+}
+
+func (f *frozen) findCodePoint(list []rune, c rune, lo, hi int32) int32 {
+ /* Examples:
+ findCodePoint(c)
+ set list[] c=0 1 3 4 7 8
+ === ============== ===========
+ [] [110000] 0 0 0 0 0 0
+ [\u0000-\u0003] [0, 4, 110000] 1 1 1 2 2 2
+ [\u0004-\u0007] [4, 8, 110000] 0 0 0 1 1 2
+ [:Any:] [0, 110000] 1 1 1 1 1 1
+ */
+
+ // Return the smallest i such that c < list[i]. Assume
+ // list[len - 1] == HIGH and that c is legal (0..HIGH-1).
+ if c < list[lo] {
+ return lo
+ }
+ // High runner test. c is often after the last range, so an
+ // initial check for this condition pays off.
+ if lo >= hi || c >= list[hi-1] {
+ return hi
+ }
+ // invariant: c >= list[lo]
+ // invariant: c < list[hi]
+ for {
+ i := (lo + hi) >> 1
+ if i == lo {
+ break // Found!
+ } else if c < list[i] {
+ hi = i
+ } else {
+ lo = i
+ }
+ }
+ return hi
+}
+
+func (f *frozen) set32x64bits(table *[64]uint32, start, limit int32) {
+ lead := start >> 6 // Named for UTF-8 2-byte lead byte with upper 5 bits.
+ trail := start & 0x3f // Named for UTF-8 2-byte trail byte with lower 6 bits.
+
+ // Set one bit indicating an all-one block.
+ bits := uint32(1) << lead
+ if (start + 1) == limit { // Single-character shortcut.
+ table[trail] |= bits
+ return
+ }
+
+ limitLead := limit >> 6
+ limitTrail := limit & 0x3f
+
+ if lead == limitLead {
+ // Partial vertical bit column.
+ for trail < limitTrail {
+ table[trail] |= bits
+ trail++
+ }
+ } else {
+ // Partial vertical bit column,
+ // followed by a bit rectangle,
+ // followed by another partial vertical bit column.
+ if trail > 0 {
+ for {
+ table[trail] |= bits
+ trail++
+ if trail >= 64 {
+ break
+ }
+ }
+ lead++
+ }
+ if lead < limitLead {
+ bits = ^((uint32(1) << lead) - 1)
+ if limitLead < 0x20 {
+ bits &= (uint32(1) << limitLead) - 1
+ }
+ for trail = 0; trail < 64; trail++ {
+ table[trail] |= bits
+ }
+ }
+ // limit<=0x800. If limit==0x800 then limitLead=32 and limitTrail=0.
+ // In that case, bits=1<= 0x100 {
+ break
+ }
+ for {
+ f.latin1Contains[start] = 1
+ start++
+ if start >= limit || start >= 0x100 {
+ break
+ }
+ }
+ if limit > 0x100 {
+ break
+ }
+ }
+
+ // Find the first range overlapping with (or after) 80..FF again,
+ // to include them in table7FF as well.
+ listIndex = 0
+ for {
+ start = list[listIndex]
+ listIndex++
+ if listIndex < len(list) {
+ limit = list[listIndex]
+ listIndex++
+ } else {
+ limit = 0x110000
+ }
+ if limit > 0x80 {
+ if start < 0x80 {
+ start = 0x80
+ }
+ break
+ }
+ }
+
+ // Set table7FF[].
+ for start < 0x800 {
+ var end rune
+ if limit <= 0x800 {
+ end = limit
+ } else {
+ end = 0x800
+ }
+ f.set32x64bits(&f.table7FF, start, end)
+ if limit > 0x800 {
+ start = 0x800
+ break
+ }
+
+ start = list[listIndex]
+ listIndex++
+ if listIndex < len(list) {
+ limit = list[listIndex]
+ listIndex++
+ } else {
+ limit = 0x110000
+ }
+ }
+
+ // Set bmpBlockBits[].
+ minStart := rune(0x800)
+ for start < 0x10000 {
+ if limit > 0x10000 {
+ limit = 0x10000
+ }
+
+ if start < minStart {
+ start = minStart
+ }
+ if start < limit { // Else: Another range entirely in a known mixed-value block.
+ if (start & 0x3f) != 0 {
+ // Mixed-value block of 64 code points.
+ start >>= 6
+ f.bmpBlockBits[start&0x3f] |= 0x10001 << (start >> 6)
+ start = (start + 1) << 6 // Round up to the next block boundary.
+ minStart = start // Ignore further ranges in this block.
+ }
+ if start < limit {
+ if start < (limit &^ 0x3f) {
+ // Multiple all-ones blocks of 64 code points each.
+ f.set32x64bits(&f.bmpBlockBits, start>>6, limit>>6)
+ }
+
+ if (limit & 0x3f) != 0 {
+ // Mixed-value block of 64 code points.
+ limit >>= 6
+ f.bmpBlockBits[limit&0x3f] |= 0x10001 << (limit >> 6)
+ limit = (limit + 1) << 6 // Round up to the next block boundary.
+ minStart = limit // Ignore further ranges in this block.
+ }
+ }
+ }
+
+ if limit == 0x10000 {
+ break
+ }
+
+ start = list[listIndex]
+ listIndex++
+ if listIndex < len(list) {
+ limit = list[listIndex]
+ listIndex++
+ } else {
+ limit = 0x110000
+ }
+ }
+}
diff --git a/go/mysql/icuregex/internal/uset/pattern.go b/go/mysql/icuregex/internal/uset/pattern.go
new file mode 100644
index 00000000000..20b44da9c6d
--- /dev/null
+++ b/go/mysql/icuregex/internal/uset/pattern.go
@@ -0,0 +1,107 @@
+/*
+© 2016 and later: Unicode, Inc. and others.
+Copyright (C) 2004-2015, International Business Machines Corporation and others.
+Copyright 2023 The Vitess Authors.
+
+This file contains code derived from the Unicode Project's ICU library.
+License & terms of use for the original code: http://www.unicode.org/copyright.html
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package uset
+
+import (
+ "strings"
+
+ "vitess.io/vitess/go/mysql/icuregex/internal/pattern"
+)
+
+func (u *UnicodeSet) String() string {
+ var buf strings.Builder
+ u.ToPattern(&buf, true)
+ return buf.String()
+}
+
+func (u *UnicodeSet) ToPattern(w *strings.Builder, escapeUnprintable bool) {
+ w.WriteByte('[')
+
+ // // Check against the predefined categories. We implicitly build
+ // // up ALL category sets the first time toPattern() is called.
+ // for (int8_t cat=0; cat 1 && u.RangeStart(0) == MinValue && u.RangeEnd(count-1) == MaxValue {
+
+ // Emit the inverse
+ w.WriteByte('^')
+
+ for i := 1; i < count; i++ {
+ start := u.RangeEnd(i-1) + 1
+ end := u.RangeStart(i) - 1
+ u.appendToPattern(w, start, escapeUnprintable)
+ if start != end {
+ if (start + 1) != end {
+ w.WriteByte('-')
+ }
+ u.appendToPattern(w, end, escapeUnprintable)
+ }
+ }
+ } else {
+ // Default; emit the ranges as pairs
+ for i := 0; i < count; i++ {
+ start := u.RangeStart(i)
+ end := u.RangeEnd(i)
+ u.appendToPattern(w, start, escapeUnprintable)
+ if start != end {
+ if (start + 1) != end {
+ w.WriteByte('-')
+ }
+ u.appendToPattern(w, end, escapeUnprintable)
+ }
+ }
+ }
+
+ w.WriteByte(']')
+}
+
+func (u *UnicodeSet) appendToPattern(w *strings.Builder, c rune, escapeUnprintable bool) {
+ if escapeUnprintable && pattern.IsUnprintable(c) {
+ // Use hex escape notation (\uxxxx or \Uxxxxxxxx) for anything
+ // unprintable
+ pattern.EscapeUnprintable(w, c)
+ return
+ }
+
+ // Okay to let ':' pass through
+ switch c {
+ case '[', ']', '-', '^', '&', '\\', '{', '}', ':', '$':
+ w.WriteByte('\\')
+ default:
+ // Escape whitespace
+ if pattern.IsWhitespace(c) {
+ w.WriteByte('\\')
+ }
+ }
+ w.WriteRune(c)
+}
diff --git a/go/mysql/icuregex/internal/uset/unicode_set.go b/go/mysql/icuregex/internal/uset/unicode_set.go
new file mode 100644
index 00000000000..e2f7bd8cbca
--- /dev/null
+++ b/go/mysql/icuregex/internal/uset/unicode_set.go
@@ -0,0 +1,686 @@
+/*
+© 2016 and later: Unicode, Inc. and others.
+Copyright (C) 2004-2015, International Business Machines Corporation and others.
+Copyright 2023 The Vitess Authors.
+
+This file contains code derived from the Unicode Project's ICU library.
+License & terms of use for the original code: http://www.unicode.org/copyright.html
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package uset
+
+import (
+ "fmt"
+ "slices"
+)
+
+// HIGH_VALUE > all valid values. 110000 for codepoints
+const unicodeSetHigh = 0x0110000
+
+// LOW <= all valid values. ZERO for codepoints
+const unicodeSetLow = 0x000000
+
+const (
+ /**
+ * Minimum value that can be stored in a UnicodeSet.
+ * @stable ICU 2.4
+ */
+ MinValue = 0
+
+ /**
+ * Maximum value that can be stored in a UnicodeSet.
+ * @stable ICU 2.4
+ */
+ MaxValue = 0x10ffff
+)
+
+type UnicodeSet struct {
+ list []rune
+ buffer []rune
+ frozen *frozen
+}
+
+func New() *UnicodeSet {
+ buf := make([]rune, 1, 25)
+ buf[0] = unicodeSetHigh
+ return &UnicodeSet{list: buf}
+}
+
+func FromRunes(list []rune) *UnicodeSet {
+ return &UnicodeSet{list: list}
+}
+
+func (u *UnicodeSet) ensureBufferCapacity(c int) {
+ if cap(u.buffer) < c {
+ u.buffer = make([]rune, c)
+ return
+ }
+ u.buffer = u.buffer[:cap(u.buffer)]
+}
+
+func (u *UnicodeSet) addbuffer(other []rune, polarity int8) {
+ if u.frozen != nil {
+ panic("UnicodeSet is frozen")
+ }
+ u.ensureBufferCapacity(len(u.list) + len(other))
+
+ i := 1
+ j := 1
+ k := 0
+
+ a := u.list[0]
+ b := other[0]
+
+ for {
+ switch polarity {
+ case 0:
+ if a < b {
+ if k > 0 && a <= u.buffer[k-1] {
+ k--
+ a = max(u.list[i], u.buffer[k])
+ } else {
+ u.buffer[k] = a
+ k++
+ a = u.list[i]
+ }
+ i++
+ polarity ^= 1
+ } else if b < a {
+ if k > 0 && b <= u.buffer[k-1] {
+ k--
+ b = max(other[j], u.buffer[k])
+ } else {
+ u.buffer[k] = b
+ k++
+ b = other[j]
+ }
+ j++
+ polarity ^= 2
+ } else {
+ if a == unicodeSetHigh {
+ goto loopEnd
+ }
+ if k > 0 && a <= u.buffer[k-1] {
+ k--
+ a = max(u.list[i], u.buffer[k])
+ } else {
+ u.buffer[k] = a
+ k++
+ a = u.list[i]
+ }
+ i++
+ polarity ^= 1
+ b = other[j]
+ j++
+ polarity ^= 2
+ }
+ case 3:
+ if b <= a {
+ if a == unicodeSetHigh {
+ goto loopEnd
+ }
+ u.buffer[k] = a
+ k++
+ } else {
+ if b == unicodeSetHigh {
+ goto loopEnd
+ }
+ u.buffer[k] = b
+ k++
+ }
+ a = u.list[i]
+ i++
+ polarity ^= 1
+ b = other[j]
+ j++
+ polarity ^= 2
+ case 1:
+ if a < b {
+ u.buffer[k] = a
+ k++
+ a = u.list[i]
+ i++
+ polarity ^= 1
+ } else if b < a {
+ b = other[j]
+ j++
+ polarity ^= 2
+ } else {
+ if a == unicodeSetHigh {
+ goto loopEnd
+ }
+ a = u.list[i]
+ i++
+ polarity ^= 1
+ b = other[j]
+ j++
+ polarity ^= 2
+ }
+ case 2:
+ if b < a {
+ u.buffer[k] = b
+ k++
+ b = other[j]
+ j++
+ polarity ^= 2
+ } else if a < b {
+ a = u.list[i]
+ i++
+ polarity ^= 1
+ } else {
+ if a == unicodeSetHigh {
+ goto loopEnd
+ }
+ a = u.list[i]
+ i++
+ polarity ^= 1
+ b = other[j]
+ j++
+ polarity ^= 2
+ }
+ }
+ }
+
+loopEnd:
+ u.buffer[k] = unicodeSetHigh
+ k++
+
+ u.list, u.buffer = u.buffer[:k], u.list
+}
+
+func pinCodePoint(c *rune) rune {
+ if *c < unicodeSetLow {
+ *c = unicodeSetLow
+ } else if *c > (unicodeSetHigh - 1) {
+ *c = unicodeSetHigh - 1
+ }
+ return *c
+}
+
+func (u *UnicodeSet) AddRune(c rune) {
+ if u.frozen != nil {
+ panic("UnicodeSet is frozen")
+ }
+
+ // find smallest i such that c < list[i]
+ // if odd, then it is IN the set
+ // if even, then it is OUT of the set
+ i := u.findCodePoint(pinCodePoint(&c))
+
+ // already in set?
+ if (i & 1) != 0 {
+ return
+ }
+
+ // HIGH is 0x110000
+ // assert(list[len-1] == HIGH);
+
+ // empty = [HIGH]
+ // [start_0, limit_0, start_1, limit_1, HIGH]
+
+ // [..., start_k-1, limit_k-1, start_k, limit_k, ..., HIGH]
+ // ^
+ // list[i]
+
+ // i == 0 means c is before the first range
+ if c == u.list[i]-1 {
+ // c is before start of next range
+ u.list[i] = c
+ // if we touched the HIGH mark, then add a new one
+ if c == (unicodeSetHigh - 1) {
+ u.list = append(u.list, unicodeSetHigh)
+ }
+ if i > 0 && c == u.list[i-1] {
+ // collapse adjacent ranges
+
+ // [..., start_k-1, c, c, limit_k, ..., HIGH]
+ // ^
+ // list[i]
+ for k := i - 1; k < len(u.list)-2; k++ {
+ u.list[k] = u.list[k+2]
+ }
+ u.list = u.list[:len(u.list)-2]
+ }
+ } else if i > 0 && c == u.list[i-1] {
+ // c is after end of prior range
+ u.list[i-1]++
+ // no need to check for collapse here
+ } else {
+ // At this point we know the new char is not adjacent to
+ // any existing ranges, and it is not 10FFFF.
+
+ // [..., start_k-1, limit_k-1, start_k, limit_k, ..., HIGH]
+ // ^
+ // list[i]
+
+ // [..., start_k-1, limit_k-1, c, c+1, start_k, limit_k, ..., HIGH]
+ // ^
+ // list[i]
+ u.list = slices.Insert(u.list, i, c, c+1)
+ }
+}
+
+func (u *UnicodeSet) AddRuneRange(start, end rune) {
+ if pinCodePoint(&start) < pinCodePoint(&end) {
+ limit := end + 1
+ // Fast path for adding a new range after the last one.
+ // Odd list length: [..., lastStart, lastLimit, HIGH]
+ if (len(u.list) & 1) != 0 {
+ // If the list is empty, set lastLimit low enough to not be adjacent to 0.
+ var lastLimit rune
+ if len(u.list) == 1 {
+ lastLimit = -2
+ } else {
+ lastLimit = u.list[len(u.list)-2]
+ }
+ if lastLimit <= start {
+ if lastLimit == start {
+ // Extend the last range.
+ u.list[len(u.list)-2] = limit
+ if limit == unicodeSetHigh {
+ u.list = u.list[:len(u.list)-1]
+ }
+ } else {
+ u.list[len(u.list)-1] = start
+ if limit < unicodeSetHigh {
+ u.list = append(u.list, limit)
+ u.list = append(u.list, unicodeSetHigh)
+ } else { // limit == UNICODESET_HIGH
+ u.list = append(u.list, unicodeSetHigh)
+ }
+ }
+ return
+ }
+ }
+ // This is slow. Could be much faster using findCodePoint(start)
+ // and modifying the list, dealing with adjacent & overlapping ranges.
+ addRange := [3]rune{start, limit, unicodeSetHigh}
+ u.addbuffer(addRange[:], 0)
+ } else if start == end {
+ u.AddRune(start)
+ }
+}
+
+func (u *UnicodeSet) AddAll(u2 *UnicodeSet) {
+ if len(u2.list) > 0 {
+ u.addbuffer(u2.list, 0)
+ }
+}
+
+func (u *UnicodeSet) Complement() {
+ if u.frozen != nil {
+ panic("UnicodeSet is frozen")
+ }
+ if u.list[0] == unicodeSetLow {
+ copy(u.list, u.list[1:])
+ u.list = u.list[:len(u.list)-1]
+ } else {
+ u.list = slices.Insert(u.list, 0, unicodeSetLow)
+ }
+}
+
+func (u *UnicodeSet) RemoveRuneRange(start, end rune) {
+ if pinCodePoint(&start) < pinCodePoint(&end) {
+ r := [3]rune{start, end + 1, unicodeSetHigh}
+ u.retain(r[:], 2)
+ }
+}
+
+func (u *UnicodeSet) RemoveAll(c *UnicodeSet) {
+ u.retain(c.list, 2)
+}
+
+func (u *UnicodeSet) RetainAll(c *UnicodeSet) {
+ u.retain(c.list, 0)
+}
+
+func (u *UnicodeSet) retain(other []rune, polarity int8) {
+ if u.frozen != nil {
+ panic("UnicodeSet is frozen")
+ }
+
+ u.ensureBufferCapacity(len(u.list) + len(other))
+
+ i := 1
+ j := 1
+ k := 0
+
+ a := u.list[0]
+ b := other[0]
+
+ // change from xor is that we have to check overlapping pairs
+ // polarity bit 1 means a is second, bit 2 means b is.
+ for {
+ switch polarity {
+ case 0: // both first; drop the smaller
+ if a < b { // drop a
+ a = u.list[i]
+ i++
+ polarity ^= 1
+ } else if b < a { // drop b
+ b = other[j]
+ j++
+ polarity ^= 2
+ } else { // a == b, take one, drop other
+ if a == unicodeSetHigh {
+ goto loop_end
+ }
+ u.buffer[k] = a
+ k++
+ a = u.list[i]
+ i++
+ polarity ^= 1
+ b = other[j]
+ j++
+ polarity ^= 2
+ }
+ case 3: // both second; take lower if unequal
+ if a < b { // take a
+ u.buffer[k] = a
+ k++
+ a = u.list[i]
+ i++
+ polarity ^= 1
+ } else if b < a { // take b
+ u.buffer[k] = b
+ k++
+ b = other[j]
+ j++
+ polarity ^= 2
+ } else { // a == b, take one, drop other
+ if a == unicodeSetHigh {
+ goto loop_end
+ }
+ u.buffer[k] = a
+ k++
+ a = u.list[i]
+ i++
+ polarity ^= 1
+ b = other[j]
+ j++
+ polarity ^= 2
+ }
+ case 1: // a second, b first;
+ if a < b { // NO OVERLAP, drop a
+ a = u.list[i]
+ i++
+ polarity ^= 1
+ } else if b < a { // OVERLAP, take b
+ u.buffer[k] = b
+ k++
+ b = other[j]
+ j++
+ polarity ^= 2
+ } else { // a == b, drop both!
+ if a == unicodeSetHigh {
+ goto loop_end
+ }
+ a = u.list[i]
+ i++
+ polarity ^= 1
+ b = other[j]
+ j++
+ polarity ^= 2
+ }
+ case 2: // a first, b second; if a < b, overlap
+ if b < a { // no overlap, drop b
+ b = other[j]
+ j++
+ polarity ^= 2
+ } else if a < b { // OVERLAP, take a
+ u.buffer[k] = a
+ k++
+ a = u.list[i]
+ i++
+ polarity ^= 1
+ } else { // a == b, drop both!
+ if a == unicodeSetHigh {
+ goto loop_end
+ }
+ a = u.list[i]
+ i++
+ polarity ^= 1
+ b = other[j]
+ j++
+ polarity ^= 2
+ }
+ }
+ }
+
+loop_end:
+ u.buffer[k] = unicodeSetHigh // terminate
+ k++
+ u.list, u.buffer = u.buffer[:k], u.list
+}
+
+func (u *UnicodeSet) Clear() {
+ if u.frozen != nil {
+ panic("UnicodeSet is frozen")
+ }
+ u.list = u.list[:1]
+ u.list[0] = unicodeSetHigh
+}
+
+func (u *UnicodeSet) Len() (n int) {
+ count := u.RangeCount()
+ for i := 0; i < count; i++ {
+ n += int(u.RangeEnd(i)) - int(u.RangeStart(i)) + 1
+ }
+ return
+}
+
+func (u *UnicodeSet) RangeCount() int {
+ return len(u.list) / 2
+}
+
+func (u *UnicodeSet) RangeStart(idx int) rune {
+ return u.list[idx*2]
+}
+
+func (u *UnicodeSet) RangeEnd(idx int) rune {
+ return u.list[idx*2+1] - 1
+}
+
+func (u *UnicodeSet) RuneAt(idx int) rune {
+ if idx >= 0 {
+ // len2 is the largest even integer <= len, that is, it is len
+ // for even values and len-1 for odd values. With odd values
+ // the last entry is UNICODESET_HIGH.
+ len2 := len(u.list)
+ if (len2 & 0x1) != 0 {
+ len2--
+ }
+
+ var i int
+ for i < len2 {
+ start := u.list[i]
+ count := int(u.list[i+1] - start)
+ i += 2
+ if idx < count {
+ return start + rune(idx)
+ }
+ idx -= count
+ }
+ }
+ return -1
+}
+
+func (u *UnicodeSet) ContainsRune(c rune) bool {
+ if f := u.frozen; f != nil {
+ if c < 0 {
+ return false
+ } else if c <= 0xff {
+ return f.latin1Contains[c] != 0
+ } else if c <= 0x7ff {
+ return (f.table7FF[c&0x3f] & (uint32(1) << (c >> 6))) != 0
+ } else if c < 0xd800 || (c >= 0xe000 && c <= 0xffff) {
+ lead := c >> 12
+ twoBits := (f.bmpBlockBits[(c>>6)&0x3f] >> lead) & 0x10001
+ if twoBits <= 1 {
+ // All 64 code points with the same bits 15..6
+ // are either in the set or not.
+ return twoBits != 0
+ }
+ // Look up the code point in its 4k block of code points.
+ return f.containsSlow(u.list, c, f.list4kStarts[lead], f.list4kStarts[lead+1])
+ } else if c <= 0x10ffff {
+ // surrogate or supplementary code point
+ return f.containsSlow(u.list, c, f.list4kStarts[0xd], f.list4kStarts[0x11])
+ }
+ // Out-of-range code points get FALSE, consistent with long-standing
+ // behavior of UnicodeSet::contains(c).
+ return false
+ }
+
+ if c >= unicodeSetHigh {
+ return false
+ }
+ i := u.findCodePoint(c)
+ return (i & 1) != 0
+}
+
+func (u *UnicodeSet) ContainsRuneRange(from, to rune) bool {
+ i := u.findCodePoint(from)
+ return (i&1) != 0 && to < u.list[i]
+}
+
+func (u *UnicodeSet) findCodePoint(c rune) int {
+ /* Examples:
+ findCodePoint(c)
+ set list[] c=0 1 3 4 7 8
+ === ============== ===========
+ [] [110000] 0 0 0 0 0 0
+ [\u0000-\u0003] [0, 4, 110000] 1 1 1 2 2 2
+ [\u0004-\u0007] [4, 8, 110000] 0 0 0 1 1 2
+ [:Any:] [0, 110000] 1 1 1 1 1 1
+ */
+
+ // Return the smallest i such that c < list[i]. Assume
+ // list[len - 1] == HIGH and that c is legal (0..HIGH-1).
+ if c < u.list[0] {
+ return 0
+ }
+
+ // High runner test. c is often after the last range, so an
+ // initial check for this condition pays off.
+ lo := 0
+ hi := len(u.list) - 1
+ if lo >= hi || c >= u.list[hi-1] {
+ return hi
+ }
+
+ // invariant: c >= list[lo]
+ // invariant: c < list[hi]
+ for {
+ i := (lo + hi) >> 1
+ if i == lo {
+ break // Found!
+ } else if c < u.list[i] {
+ hi = i
+ } else {
+ lo = i
+ }
+ }
+ return hi
+}
+
+func (u *UnicodeSet) AddString(chars string) {
+ for _, c := range chars {
+ u.AddRune(c)
+ }
+}
+
+type Filter func(ch rune) bool
+
+func (u *UnicodeSet) ApplyFilter(inclusions *UnicodeSet, filter Filter) {
+ // Logically, walk through all Unicode characters, noting the start
+ // and end of each range for which filter.contain(c) is
+ // true. Add each range to a set.
+ //
+ // To improve performance, use an inclusions set which
+ // encodes information about character ranges that are known
+ // to have identical properties.
+ // inclusions contains the first characters of
+ // same-value ranges for the given property.
+
+ u.Clear()
+
+ startHasProperty := rune(-1)
+ limitRange := inclusions.RangeCount()
+
+ for j := 0; j < limitRange; j++ {
+ // get current range
+ start := inclusions.RangeStart(j)
+ end := inclusions.RangeEnd(j)
+
+ // for all the code points in the range, process
+ for ch := start; ch <= end; ch++ {
+ // only add to this UnicodeSet on inflection points --
+ // where the hasProperty value changes to false
+ if filter(ch) {
+ if startHasProperty < 0 {
+ startHasProperty = ch
+ }
+ } else if startHasProperty >= 0 {
+ u.AddRuneRange(startHasProperty, ch-1)
+ startHasProperty = -1
+ }
+ }
+ }
+ if startHasProperty >= 0 {
+ u.AddRuneRange(startHasProperty, 0x10FFFF)
+ }
+}
+
+func (u *UnicodeSet) Clone() *UnicodeSet {
+ return &UnicodeSet{list: slices.Clone(u.list)}
+}
+
+func (u *UnicodeSet) IsEmpty() bool {
+ return len(u.list) == 1
+}
+
+func (u *UnicodeSet) CopyFrom(set *UnicodeSet) {
+ if u.frozen != nil {
+ panic("UnicodeSet is frozen")
+ }
+ u.list = slices.Clone(set.list)
+}
+
+func (u *UnicodeSet) Equals(other *UnicodeSet) bool {
+ return slices.Equal(u.list, other.list)
+}
+
+func (u *UnicodeSet) Freeze() *UnicodeSet {
+ u.frozen = freeze(u.list)
+ return u
+}
+
+func (u *UnicodeSet) FreezeCheck_() error {
+ if u == nil {
+ return nil
+ }
+ if u.frozen == nil {
+ return fmt.Errorf("UnicodeSet is not frozen")
+ }
+ for r := rune(0); r <= 0x10ffff; r++ {
+ want := (u.findCodePoint(r) & 1) != 0
+ got := u.ContainsRune(r)
+ if want != got {
+ return fmt.Errorf("rune '%c' (U+%04X) did not freeze", r, r)
+ }
+ }
+ return nil
+}
diff --git a/go/mysql/icuregex/internal/uset/unicode_set_test.go b/go/mysql/icuregex/internal/uset/unicode_set_test.go
new file mode 100644
index 00000000000..908abd8889d
--- /dev/null
+++ b/go/mysql/icuregex/internal/uset/unicode_set_test.go
@@ -0,0 +1,43 @@
+/*
+© 2016 and later: Unicode, Inc. and others.
+Copyright (C) 2004-2015, International Business Machines Corporation and others.
+Copyright 2023 The Vitess Authors.
+
+This file contains code derived from the Unicode Project's ICU library.
+License & terms of use for the original code: http://www.unicode.org/copyright.html
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package uset
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestSimpleBelong(t *testing.T) {
+ ss1 := New()
+ ss1.AddString("*?+[(){}^$|\\.")
+ ss2 := New()
+ ss2.AddString("*?+[(){}^$|\\.")
+ ss2.Complement()
+ ss3 := New()
+ ss3.AddRune('*')
+ ss3.AddRune('?')
+
+ assert.True(t, ss1.ContainsRune('('))
+ assert.False(t, ss2.ContainsRune('('))
+ assert.True(t, ss3.ContainsRune('*'))
+}
diff --git a/go/mysql/icuregex/internal/utf16/helpers.go b/go/mysql/icuregex/internal/utf16/helpers.go
new file mode 100644
index 00000000000..bdf53ae731c
--- /dev/null
+++ b/go/mysql/icuregex/internal/utf16/helpers.go
@@ -0,0 +1,65 @@
+/*
+© 2016 and later: Unicode, Inc. and others.
+Copyright (C) 2004-2015, International Business Machines Corporation and others.
+Copyright 2023 The Vitess Authors.
+
+This file contains code derived from the Unicode Project's ICU library.
+License & terms of use for the original code: http://www.unicode.org/copyright.html
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package utf16
+
+import "unicode/utf16"
+
+func IsLead(c rune) bool {
+ return (uint32(c) & 0xfffffc00) == 0xd800
+}
+
+func IsTrail(c rune) bool {
+ return (uint32(c) & 0xfffffc00) == 0xdc00
+}
+
+/**
+ * Is this code point a surrogate (U+d800..U+dfff)?
+ * @param c 32-bit code point
+ * @return true or false
+ * @stable ICU 2.4
+ */
+func IsSurrogate(c rune) bool {
+ return (uint32(c) & 0xfffff800) == 0xd800
+}
+
+/**
+ * Assuming c is a surrogate code point (U_IS_SURROGATE(c)),
+ * is it a lead surrogate?
+ * @param c 32-bit code point
+ * @return true or false
+ * @stable ICU 2.4
+ */
+func IsSurrogateLead(c rune) bool {
+ return (uint32(c) & 0x400) == 0
+}
+
+func DecodeRune(a, b rune) rune {
+ return utf16.DecodeRune(a, b)
+}
+
+func NextUnsafe(s []uint16) (rune, []uint16) {
+ c := rune(s[0])
+ if !IsLead(c) {
+ return c, s[1:]
+ }
+ return DecodeRune(c, rune(s[1])), s[2:]
+}
diff --git a/go/mysql/icuregex/internal/utrie/ucptrie.go b/go/mysql/icuregex/internal/utrie/ucptrie.go
new file mode 100644
index 00000000000..74e4eb9b2fa
--- /dev/null
+++ b/go/mysql/icuregex/internal/utrie/ucptrie.go
@@ -0,0 +1,708 @@
+/*
+© 2016 and later: Unicode, Inc. and others.
+Copyright (C) 2004-2015, International Business Machines Corporation and others.
+Copyright 2023 The Vitess Authors.
+
+This file contains code derived from the Unicode Project's ICU library.
+License & terms of use for the original code: http://www.unicode.org/copyright.html
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package utrie
+
+import (
+ "errors"
+ "fmt"
+
+ "vitess.io/vitess/go/mysql/icuregex/internal/udata"
+)
+
+type UcpTrie struct {
+ index []uint16
+ data8 []uint8
+ data16 []uint16
+ data32 []uint32
+
+ indexLength, dataLength int32
+ /** Start of the last range which ends at U+10FFFF. @internal */
+ highStart rune
+ shifted12HighStart uint16
+
+ typ ucpTrieType
+ valueWidth ucpTrieValueWidth
+
+ /**
+ * Internal index-3 null block offset.
+ * Set to an impossibly high value (e.g., 0xffff) if there is no dedicated index-3 null block.
+ * @internal
+ */
+ index3NullOffset uint16
+ /**
+ * Internal data null block offset, not shifted.
+ * Set to an impossibly high value (e.g., 0xfffff) if there is no dedicated data null block.
+ * @internal
+ */
+ dataNullOffset int32
+
+ nullValue uint32
+}
+
+/**
+ * Selectors for the type of a UCPTrie.
+ * Different trade-offs for size vs. speed.
+ *
+ * @see umutablecptrie_buildImmutable
+ * @see ucptrie_openFromBinary
+ * @see ucptrie_getType
+ * @stable ICU 63
+ */
+type ucpTrieType int8
+
+const (
+ /**
+ * For ucptrie_openFromBinary() to accept any type.
+ * ucptrie_getType() will return the actual type.
+ * @stable ICU 63
+ */
+ typeAny ucpTrieType = iota - 1
+ /**
+ * Fast/simple/larger BMP data structure. Use functions and "fast" macros.
+ * @stable ICU 63
+ */
+ typeFast
+ /**
+ * Small/slower BMP data structure. Use functions and "small" macros.
+ * @stable ICU 63
+ */
+ typeSmall
+)
+
+/**
+ * Selectors for the number of bits in a UCPTrie data value.
+ *
+ * @see umutablecptrie_buildImmutable
+ * @see ucptrie_openFromBinary
+ * @see ucptrie_getValueWidth
+ * @stable ICU 63
+ */
+type ucpTrieValueWidth int8
+
+const (
+ /**
+ * For ucptrie_openFromBinary() to accept any data value width.
+ * ucptrie_getValueWidth() will return the actual data value width.
+ * @stable ICU 63
+ */
+ valueBitsAny ucpTrieValueWidth = iota - 1
+ /**
+ * The trie stores 16 bits per data value.
+ * It returns them as unsigned values 0..0xffff=65535.
+ * @stable ICU 63
+ */
+ valueBits16
+ /**
+ * The trie stores 32 bits per data value.
+ * @stable ICU 63
+ */
+ valueBits32
+ /**
+ * The trie stores 8 bits per data value.
+ * It returns them as unsigned values 0..0xff=255.
+ * @stable ICU 63
+ */
+ valueBits8
+)
+
+const ucpTrieSig = 0x54726933
+const ucpTrieOESig = 0x33697254
+
+/**
+ * Constants for use with UCPTrieHeader.options.
+ * @internal
+ */
+const (
+ optionsDataLengthMask = 0xf000
+ optionsDataNullOffsetMask = 0xf00
+ optionsReservedMask = 0x38
+ optionsValueBitsMask = 7
+)
+
+const (
+ /** @internal */
+ fastShift = 6
+
+ /** Number of entries in a data block for code points below the fast limit. 64=0x40 @internal */
+ fastDataBlockLength = 1 << fastShift
+
+ /** Mask for getting the lower bits for the in-fast-data-block offset. @internal */
+ fastDataMask = fastDataBlockLength - 1
+
+ /** @internal */
+ smallMax = 0xfff
+
+ /**
+ * Offset from dataLength (to be subtracted) for fetching the
+ * value returned for out-of-range code points and ill-formed UTF-8/16.
+ * @internal
+ */
+ errorValueNegDataOffset = 1
+ /**
+ * Offset from dataLength (to be subtracted) for fetching the
+ * value returned for code points highStart..U+10FFFF.
+ * @internal
+ */
+ highValueNegDataOffset = 2
+)
+
+// Internal constants.
+const (
+ /** The length of the BMP index table. 1024=0x400 */
+ bmpIndexLength = 0x10000 >> fastShift
+
+ smallLimit = 0x1000
+ smallIndexLength = smallLimit >> fastShift
+
+ /** Shift size for getting the index-3 table offset. */
+ ucpShift3 = 4
+
+ /** Shift size for getting the index-2 table offset. */
+ ucpShift2 = 5 + ucpShift3
+
+ /** Shift size for getting the index-1 table offset. */
+ ucpShift1 = 5 + ucpShift2
+
+ /**
+ * Difference between two shift sizes,
+ * for getting an index-2 offset from an index-3 offset. 5=9-4
+ */
+ ucpShift2Min3 = ucpShift2 - ucpShift3
+
+ /**
+ * Difference between two shift sizes,
+ * for getting an index-1 offset from an index-2 offset. 5=14-9
+ */
+ ucpShift1Min2 = ucpShift1 - ucpShift2
+
+ /**
+ * Number of index-1 entries for the BMP. (4)
+ * This part of the index-1 table is omitted from the serialized form.
+ */
+ ucpOmittedBmpIndex1Length = 0x10000 >> ucpShift1
+
+ /** Number of entries in an index-2 block. 32=0x20 */
+ ucpIndex2BlockLength = 1 << ucpShift1Min2
+
+ /** Mask for getting the lower bits for the in-index-2-block offset. */
+ ucpIndex2Mask = ucpIndex2BlockLength - 1
+
+ /** Number of code points per index-2 table entry. 512=0x200 */
+ ucpCpPerIndex2Entry = 1 << ucpShift2
+
+ /** Number of entries in an index-3 block. 32=0x20 */
+ ucpIndex3BlockLength = 1 << ucpShift2Min3
+
+ /** Mask for getting the lower bits for the in-index-3-block offset. */
+ ucpIndex3Mask = ucpIndex3BlockLength - 1
+
+ /** Number of entries in a small data block. 16=0x10 */
+ ucpSmallDataBlockLength = 1 << ucpShift3
+
+ /** Mask for getting the lower bits for the in-small-data-block offset. */
+ ucpSmallDataMask = ucpSmallDataBlockLength - 1
+)
+
+func UcpTrieFromBytes(bytes *udata.Bytes) (*UcpTrie, error) {
+ type ucpHeader struct {
+ /** "Tri3" in big-endian US-ASCII (0x54726933) */
+ signature uint32
+
+ /**
+ * Options bit field:
+ * Bits 15..12: Data length bits 19..16.
+ * Bits 11..8: Data null block offset bits 19..16.
+ * Bits 7..6: UCPTrieType
+ * Bits 5..3: Reserved (0).
+ * Bits 2..0: UCPTrieValueWidth
+ */
+ options uint16
+
+ /** Total length of the index tables. */
+ indexLength uint16
+
+ /** Data length bits 15..0. */
+ dataLength uint16
+
+ /** Index-3 null block offset, 0x7fff or 0xffff if none. */
+ index3NullOffset uint16
+
+ /** Data null block offset bits 15..0, 0xfffff if none. */
+ dataNullOffset uint16
+
+ /**
+ * First code point of the single-value range ending with U+10ffff,
+ * rounded up and then shifted right by UCPTRIE_SHIFT_2.
+ */
+ shiftedHighStart uint16
+ }
+
+ var header ucpHeader
+ header.signature = bytes.Uint32()
+
+ switch header.signature {
+ case ucpTrieSig:
+ case ucpTrieOESig:
+ return nil, errors.New("unsupported: BigEndian encoding")
+ default:
+ return nil, fmt.Errorf("invalid signature for UcpTrie: 0x%08x", header.signature)
+ }
+
+ header.options = bytes.Uint16()
+ header.indexLength = bytes.Uint16()
+ header.dataLength = bytes.Uint16()
+ header.index3NullOffset = bytes.Uint16()
+ header.dataNullOffset = bytes.Uint16()
+ header.shiftedHighStart = bytes.Uint16()
+
+ typeInt := (header.options >> 6) & 3
+ valueWidthInt := header.options & optionsValueBitsMask
+ if typeInt > uint16(typeSmall) || valueWidthInt > uint16(valueBits8) ||
+ (header.options&optionsReservedMask) != 0 {
+ return nil, errors.New("invalid options for serialized UcpTrie")
+ }
+ actualType := ucpTrieType(typeInt)
+ actualValueWidth := ucpTrieValueWidth(valueWidthInt)
+
+ trie := &UcpTrie{
+ indexLength: int32(header.indexLength),
+ dataLength: int32(((header.options & optionsDataLengthMask) << 4) | header.dataLength),
+ index3NullOffset: header.index3NullOffset,
+ dataNullOffset: int32(((header.options & optionsDataNullOffsetMask) << 8) | header.dataNullOffset),
+ highStart: rune(header.shiftedHighStart) << ucpShift2,
+ typ: actualType,
+ valueWidth: actualValueWidth,
+ }
+ nullValueOffset := trie.dataNullOffset
+ if nullValueOffset >= trie.dataLength {
+ nullValueOffset = trie.dataLength - highValueNegDataOffset
+ }
+
+ trie.shifted12HighStart = uint16((trie.highStart + 0xfff) >> 12)
+ trie.index = bytes.Uint16Slice(int32(header.indexLength))
+ switch actualValueWidth {
+ case valueBits16:
+ trie.data16 = bytes.Uint16Slice(trie.dataLength)
+ trie.nullValue = uint32(trie.data16[nullValueOffset])
+ case valueBits32:
+ trie.data32 = bytes.Uint32Slice(trie.dataLength)
+ trie.nullValue = trie.data32[nullValueOffset]
+ case valueBits8:
+ trie.data8 = bytes.Uint8Slice(trie.dataLength)
+ trie.nullValue = uint32(trie.data8[nullValueOffset])
+ }
+
+ return trie, nil
+}
+
+func (t *UcpTrie) Get(c rune) uint32 {
+ var dataIndex int32
+ if c <= 0x7f {
+ // linear ASCII
+ dataIndex = c
+ } else {
+ var fastMax rune
+ if t.typ == typeFast {
+ fastMax = 0xffff
+ } else {
+ fastMax = smallMax
+ }
+ dataIndex = t.cpIndex(fastMax, c)
+ }
+ return t.getValue(dataIndex)
+}
+
+func (t *UcpTrie) getValue(dataIndex int32) uint32 {
+ switch t.valueWidth {
+ case valueBits16:
+ return uint32(t.data16[dataIndex])
+ case valueBits32:
+ return t.data32[dataIndex]
+ case valueBits8:
+ return uint32(t.data8[dataIndex])
+ default:
+ // Unreachable if the trie is properly initialized.
+ return 0xffffffff
+ }
+}
+
+/** Internal trie getter for a code point below the fast limit. Returns the data index. @internal */
+func (t *UcpTrie) fastIndex(c rune) int32 {
+ return int32(t.index[c>>fastShift]) + (c & fastDataMask)
+}
+
+/** Internal trie getter for a code point at or above the fast limit. Returns the data index. @internal */
+func (t *UcpTrie) smallIndex(c rune) int32 {
+ if c >= t.highStart {
+ return t.dataLength - highValueNegDataOffset
+ }
+ return t.internalSmallIndex(c)
+}
+
+func (t *UcpTrie) internalSmallIndex(c rune) int32 {
+ i1 := c >> ucpShift1
+ if t.typ == typeFast {
+ i1 += bmpIndexLength - ucpOmittedBmpIndex1Length
+ } else {
+ i1 += smallIndexLength
+ }
+ i3Block := int32(t.index[int32(t.index[i1])+((c>>ucpShift2)&ucpIndex2Mask)])
+ i3 := (c >> ucpShift3) & ucpIndex3Mask
+ var dataBlock int32
+ if (i3Block & 0x8000) == 0 {
+ // 16-bit indexes
+ dataBlock = int32(t.index[i3Block+i3])
+ } else {
+ // 18-bit indexes stored in groups of 9 entries per 8 indexes.
+ i3Block = (i3Block & 0x7fff) + (i3 & ^7) + (i3 >> 3)
+ i3 &= 7
+ dataBlock = int32(t.index[i3Block]) << (2 + (2 * i3)) & 0x30000
+ i3Block++
+ dataBlock |= int32(t.index[i3Block+i3])
+ }
+ return dataBlock + (c & ucpSmallDataMask)
+}
+
+/**
+ * Internal trie getter for a code point, with checking that c is in U+0000..10FFFF.
+ * Returns the data index.
+ * @internal
+ */
+func (t *UcpTrie) cpIndex(fastMax, c rune) int32 {
+ if c <= fastMax {
+ return t.fastIndex(c)
+ }
+ if c <= 0x10ffff {
+ return t.smallIndex(c)
+ }
+ return t.dataLength - errorValueNegDataOffset
+}
+
+/**
+ * Selectors for how ucpmap_getRange() etc. should report value ranges overlapping with surrogates.
+ * Most users should use UCPMAP_RANGE_NORMAL.
+ *
+ * @see ucpmap_getRange
+ * @see ucptrie_getRange
+ * @see umutablecptrie_getRange
+ * @stable ICU 63
+ */
+type UcpMapRangeOption int8
+
+const (
+ /**
+ * ucpmap_getRange() enumerates all same-value ranges as stored in the map.
+ * Most users should use this option.
+ * @stable ICU 63
+ */
+ UcpMapRangeNormal UcpMapRangeOption = iota
+ /**
+ * ucpmap_getRange() enumerates all same-value ranges as stored in the map,
+ * except that lead surrogates (U+D800..U+DBFF) are treated as having the
+ * surrogateValue, which is passed to getRange() as a separate parameter.
+ * The surrogateValue is not transformed via filter().
+ * See U_IS_LEAD(c).
+ *
+ * Most users should use UCPMAP_RANGE_NORMAL instead.
+ *
+ * This option is useful for maps that map surrogate code *units* to
+ * special values optimized for UTF-16 string processing
+ * or for special error behavior for unpaired surrogates,
+ * but those values are not to be associated with the lead surrogate code *points*.
+ * @stable ICU 63
+ */
+ UcpMapRangeFixedLeadSurrogates
+ /**
+ * ucpmap_getRange() enumerates all same-value ranges as stored in the map,
+ * except that all surrogates (U+D800..U+DFFF) are treated as having the
+ * surrogateValue, which is passed to getRange() as a separate parameter.
+ * The surrogateValue is not transformed via filter().
+ * See U_IS_SURROGATE(c).
+ *
+ * Most users should use UCPMAP_RANGE_NORMAL instead.
+ *
+ * This option is useful for maps that map surrogate code *units* to
+ * special values optimized for UTF-16 string processing
+ * or for special error behavior for unpaired surrogates,
+ * but those values are not to be associated with the lead surrogate code *points*.
+ * @stable ICU 63
+ */
+ UcpMapRangeFixedAllSurrogates
+)
+
+/**
+ * Callback function type: Modifies a map value.
+ * Optionally called by ucpmap_getRange()/ucptrie_getRange()/umutablecptrie_getRange().
+ * The modified value will be returned by the getRange function.
+ *
+ * Can be used to ignore some of the value bits,
+ * make a filter for one of several values,
+ * return a value index computed from the map value, etc.
+ *
+ * @param context an opaque pointer, as passed into the getRange function
+ * @param value a value from the map
+ * @return the modified value
+ * @stable ICU 63
+ */
+type UcpMapValueFilter func(value uint32) uint32
+
+/**
+ * GetRange returns the last code point such that all those from start to there have the same value.
+ * Can be used to efficiently iterate over all same-value ranges in a trie.
+ * (This is normally faster than iterating over code points and get()ting each value,
+ * but much slower than a data structure that stores ranges directly.)
+ *
+ * If the UCPMapValueFilter function pointer is not NULL, then
+ * the value to be delivered is passed through that function, and the return value is the end
+ * of the range where all values are modified to the same actual value.
+ * The value is unchanged if that function pointer is NULL.
+ *
+ * Example:
+ * \code
+ * UChar32 start = 0, end;
+ * uint32_t value;
+ * while ((end = ucptrie_getRange(trie, start, UCPMAP_RANGE_NORMAL, 0,
+ * NULL, NULL, &value)) >= 0) {
+ * // Work with the range start..end and its value.
+ * start = end + 1;
+ * }
+ * \endcode
+ *
+ * @param trie the trie
+ * @param start range start
+ * @param option defines whether surrogates are treated normally,
+ * or as having the surrogateValue; usually UCPMAP_RANGE_NORMAL
+ * @param surrogateValue value for surrogates; ignored if option==UCPMAP_RANGE_NORMAL
+ * @param filter a pointer to a function that may modify the trie data value,
+ * or NULL if the values from the trie are to be used unmodified
+ * @param context an opaque pointer that is passed on to the filter function
+ * @param pValue if not NULL, receives the value that every code point start..end has;
+ * may have been modified by filter(context, trie value)
+ * if that function pointer is not NULL
+ * @return the range end code point, or -1 if start is not a valid code point
+ * @stable ICU 63
+ */
+func (t *UcpTrie) GetRange(start rune, option UcpMapRangeOption, surrogateValue uint32, filter UcpMapValueFilter) (rune, uint32) {
+ if option == UcpMapRangeNormal {
+ return t.getRange(start, filter)
+ }
+
+ var surrEnd rune
+ if option == UcpMapRangeFixedAllSurrogates {
+ surrEnd = 0xdfff
+ } else {
+ surrEnd = 0xdbff
+ }
+ end, value := t.getRange(start, filter)
+ if end < 0xd7ff || start > surrEnd {
+ return end, value
+ }
+ if value == surrogateValue {
+ if end >= surrEnd {
+ // Surrogates followed by a non-surrogateValue range,
+ // or surrogates are part of a larger surrogateValue range.
+ return end, value
+ }
+ } else {
+ if start <= 0xd7ff {
+ return 0xd7ff, value // Non-surrogateValue range ends before surrogateValue surrogates.
+ }
+ // Start is a surrogate with a non-surrogateValue code *unit* value.
+ // Return a surrogateValue code *point* range.
+ value = surrogateValue
+ if end > surrEnd {
+ return surrEnd, value // Surrogate range ends before non-surrogateValue rest of range.
+ }
+ }
+ // See if the surrogateValue surrogate range can be merged with
+ // an immediately following range.
+ end2, value2 := t.getRange(surrEnd+1, filter)
+ if value2 == surrogateValue {
+ return end2, value
+ }
+ return surrEnd, value
+}
+
+const maxUnicode = 0x10ffff
+
+func (t *UcpTrie) getRange(start rune, filter UcpMapValueFilter) (rune, uint32) {
+ if start > maxUnicode {
+ return -1, 0
+ }
+
+ if start >= t.highStart {
+ di := t.dataLength - highValueNegDataOffset
+ value := t.getValue(di)
+ if filter != nil {
+ value = filter(value)
+ }
+ return maxUnicode, value
+ }
+
+ nullValue := t.nullValue
+ if filter != nil {
+ nullValue = filter(nullValue)
+ }
+ index := t.index
+
+ prevI3Block := int32(-1)
+ prevBlock := int32(-1)
+ c := start
+ var trieValue uint32
+ value := nullValue
+ haveValue := false
+ for {
+ var i3Block, i3, i3BlockLength, dataBlockLength int32
+ if c <= 0xffff && (t.typ == typeFast || c <= smallMax) {
+ i3Block = 0
+ i3 = c >> fastShift
+ if t.typ == typeFast {
+ i3BlockLength = bmpIndexLength
+ } else {
+ i3BlockLength = smallIndexLength
+ }
+ dataBlockLength = fastDataBlockLength
+ } else {
+ // Use the multi-stage index.
+ i1 := c >> ucpShift1
+ if t.typ == typeFast {
+ i1 += bmpIndexLength - ucpOmittedBmpIndex1Length
+ } else {
+ i1 += smallIndexLength
+ }
+ shft := c >> ucpShift2
+ idx := int32(t.index[i1]) + (shft & ucpIndex2Mask)
+ i3Block = int32(t.index[idx])
+ if i3Block == prevI3Block && (c-start) >= ucpCpPerIndex2Entry {
+ // The index-3 block is the same as the previous one, and filled with value.
+ c += ucpCpPerIndex2Entry
+ continue
+ }
+ prevI3Block = i3Block
+ if i3Block == int32(t.index3NullOffset) {
+ // This is the index-3 null block.
+ if haveValue {
+ if nullValue != value {
+ return c - 1, value
+ }
+ } else {
+ trieValue = t.nullValue
+ value = nullValue
+ haveValue = true
+ }
+ prevBlock = t.dataNullOffset
+ c = (c + ucpCpPerIndex2Entry) & ^(ucpCpPerIndex2Entry - 1)
+ continue
+ }
+ i3 = (c >> ucpShift3) & ucpIndex3Mask
+ i3BlockLength = ucpIndex3BlockLength
+ dataBlockLength = ucpSmallDataBlockLength
+ }
+
+ // Enumerate data blocks for one index-3 block.
+ for {
+ var block int32
+ if (i3Block & 0x8000) == 0 {
+ block = int32(index[i3Block+i3])
+ } else {
+ // 18-bit indexes stored in groups of 9 entries per 8 indexes.
+ group := (i3Block & 0x7fff) + (i3 & ^7) + (i3 >> 3)
+ gi := i3 & 7
+ block = (int32(index[group]) << (2 + (2 * gi))) & 0x30000
+ group++
+ block |= int32(index[group+gi])
+ }
+ if block == prevBlock && (c-start) >= dataBlockLength {
+ // The block is the same as the previous one, and filled with value.
+ c += dataBlockLength
+ } else {
+ dataMask := dataBlockLength - 1
+ prevBlock = block
+ if block == t.dataNullOffset {
+ // This is the data null block.
+ if haveValue {
+ if nullValue != value {
+ return c - 1, value
+ }
+ } else {
+ trieValue = t.nullValue
+ value = nullValue
+ haveValue = true
+ }
+ c = (c + dataBlockLength) & ^dataMask
+ } else {
+ di := block + (c & dataMask)
+ trieValue2 := t.getValue(di)
+ if haveValue {
+ if trieValue2 != trieValue {
+ if filter == nil || maybeFilterValue(trieValue2, t.nullValue, nullValue, filter) != value {
+ return c - 1, value
+ }
+ trieValue = trieValue2 // may or may not help
+ }
+ } else {
+ trieValue = trieValue2
+ value = maybeFilterValue(trieValue2, t.nullValue, nullValue, filter)
+ haveValue = true
+ }
+ for {
+ c++
+ if c&dataMask == 0 {
+ break
+ }
+ di++
+ trieValue2 = t.getValue(di)
+ if trieValue2 != trieValue {
+ if filter == nil || maybeFilterValue(trieValue2, t.nullValue, nullValue, filter) != value {
+ return c - 1, value
+ }
+ trieValue = trieValue2 // may or may not help
+ }
+ }
+ }
+ }
+ i3++
+ if i3 >= i3BlockLength {
+ break
+ }
+ }
+ if c >= t.highStart {
+ break
+ }
+ }
+
+ di := t.dataLength - highValueNegDataOffset
+ highValue := t.getValue(di)
+ if maybeFilterValue(highValue, t.nullValue, nullValue, filter) != value {
+ return c - 1, value
+ }
+ return maxUnicode, value
+}
+
+func maybeFilterValue(value uint32, trieNullValue uint32, nullValue uint32, filter UcpMapValueFilter) uint32 {
+ if value == trieNullValue {
+ value = nullValue
+ } else if filter != nil {
+ value = filter(value)
+ }
+ return value
+}
diff --git a/go/mysql/icuregex/internal/utrie/utrie2.go b/go/mysql/icuregex/internal/utrie/utrie2.go
new file mode 100644
index 00000000000..2a474356b97
--- /dev/null
+++ b/go/mysql/icuregex/internal/utrie/utrie2.go
@@ -0,0 +1,433 @@
+/*
+© 2016 and later: Unicode, Inc. and others.
+Copyright (C) 2004-2015, International Business Machines Corporation and others.
+Copyright 2023 The Vitess Authors.
+
+This file contains code derived from the Unicode Project's ICU library.
+License & terms of use for the original code: http://www.unicode.org/copyright.html
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package utrie
+
+import (
+ "errors"
+ "fmt"
+
+ "vitess.io/vitess/go/mysql/icuregex/internal/udata"
+ "vitess.io/vitess/go/mysql/icuregex/internal/utf16"
+)
+
+type UTrie2 struct {
+ index []uint16
+ data16 []uint16
+ data32 []uint32
+
+ indexLength, dataLength int
+ index2NullOffset uint16
+ dataNullOffset uint16
+ InitialValue uint32
+ ErrorValue uint32
+
+ HighStart rune
+ HighValueIndex int
+}
+
+func (t *UTrie2) SerializedLength() int32 {
+ return 16 + int32(t.indexLength+t.dataLength)*2
+}
+
+func (t *UTrie2) getIndex(asciiOffset int, c rune) uint16 {
+ return t.index[t.indexFromCp(asciiOffset, c)]
+}
+
+func (t *UTrie2) Get16(c rune) uint16 {
+ return t.getIndex(t.indexLength, c)
+}
+
+func (t *UTrie2) indexFromCp(asciiOffset int, c rune) int {
+ switch {
+ case c < 0xd800:
+ return indexRaw(0, t.index, c)
+ case c <= 0xffff:
+ var offset int32
+ if c <= 0xdbff {
+ offset = lscpIndex2Offset - (0xd800 >> shift2)
+ }
+ return indexRaw(offset, t.index, c)
+ case c > 0x10ffff:
+ return asciiOffset + badUtf8DataOffset
+ case c >= t.HighStart:
+ return t.HighValueIndex
+ default:
+ return indexFromSupp(t.index, c)
+ }
+}
+
+type EnumRange func(start, end rune, value uint32) bool
+type EnumValue func(value uint32) uint32
+
+func (t *UTrie2) Enum(enumValue EnumValue, enumRange EnumRange) {
+ t.enumEitherTrie(0, 0x110000, enumValue, enumRange)
+}
+
+func enumSameValue(value uint32) uint32 {
+ return value
+}
+
+func (t *UTrie2) enumEitherTrie(start, limit rune, enumValue EnumValue, enumRange EnumRange) {
+ if enumRange == nil {
+ return
+ }
+ if enumValue == nil {
+ enumValue = enumSameValue
+ }
+
+ /* frozen trie */
+ var (
+ idx = t.index
+ data32 = t.data32
+ index2NullOffset = int(t.index2NullOffset)
+ nullBlock = int(t.dataNullOffset)
+
+ c rune
+ prev = start
+ highStart = t.HighStart
+
+ /* get the enumeration value that corresponds to an initial-value trie data entry */
+ initialValue = enumValue(t.InitialValue)
+
+ /* set variables for previous range */
+ i2Block int
+ block int
+ prevI2Block = -1
+ prevBlock = -1
+ prevValue = uint32(0)
+ )
+
+ /* enumerate index-2 blocks */
+ for c = start; c < limit && c < highStart; {
+ /* Code point limit for iterating inside this i2Block. */
+ tempLimit := c + cpPerIndex1Entry
+ if limit < tempLimit {
+ tempLimit = limit
+ }
+ if c <= 0xffff {
+ if !utf16.IsSurrogate(c) {
+ i2Block = int(c >> shift2)
+ } else if utf16.IsSurrogateLead(c) {
+ /*
+ * Enumerate values for lead surrogate code points, not code units:
+ * This special block has half the normal length.
+ */
+ i2Block = lscpIndex2Offset
+ tempLimit = min(0xdc00, limit)
+ } else {
+ /*
+ * Switch back to the normal part of the index-2 table.
+ * Enumerate the second half of the surrogates block.
+ */
+ i2Block = 0xd800 >> shift2
+ tempLimit = min(0xe000, limit)
+ }
+ } else {
+ /* supplementary code points */
+ i2Block = int(idx[(index1Offset-omittedBmpIndex1Length)+(c>>shift1)])
+ if i2Block == prevI2Block && (c-prev) >= cpPerIndex1Entry {
+ /*
+ * The index-2 block is the same as the previous one, and filled with prevValue.
+ * Only possible for supplementary code points because the linear-BMP index-2
+ * table creates unique i2Block values.
+ */
+ c += cpPerIndex1Entry
+ continue
+ }
+ }
+ prevI2Block = i2Block
+ if i2Block == index2NullOffset {
+ /* this is the null index-2 block */
+ if prevValue != initialValue {
+ if prev < c && !enumRange(prev, c-1, prevValue) {
+ return
+ }
+ prevBlock = nullBlock
+ prev = c
+ prevValue = initialValue
+ }
+ c += cpPerIndex1Entry
+ } else {
+ /* enumerate data blocks for one index-2 block */
+ var i2Limit int
+ if (c >> shift1) == (tempLimit >> shift1) {
+ i2Limit = int(tempLimit>>shift2) & index2Mask
+ } else {
+ i2Limit = index2BlockLength
+ }
+ for i2 := int(c>>shift2) & index2Mask; i2 < i2Limit; i2++ {
+ block = int(idx[i2Block+i2] << indexShift)
+ if block == prevBlock && (c-prev) >= dataBlockLength {
+ /* the block is the same as the previous one, and filled with prevValue */
+ c += dataBlockLength
+ continue
+ }
+ prevBlock = block
+ if block == nullBlock {
+ /* this is the null data block */
+ if prevValue != initialValue {
+ if prev < c && !enumRange(prev, c-1, prevValue) {
+ return
+ }
+ prev = c
+ prevValue = initialValue
+ }
+ c += dataBlockLength
+ } else {
+ for j := 0; j < dataBlockLength; j++ {
+ var value uint32
+ if data32 != nil {
+ value = data32[block+j]
+ } else {
+ value = uint32(idx[block+j])
+ }
+ value = enumValue(value)
+ if value != prevValue {
+ if prev < c && !enumRange(prev, c-1, prevValue) {
+ return
+ }
+ prev = c
+ prevValue = value
+ }
+ c++
+ }
+ }
+ }
+ }
+ }
+
+ if c > limit {
+ c = limit /* could be higher if in the index2NullOffset */
+ } else if c < limit {
+ /* c==highStart>shift1)])
+ return (int(index[i1+int((c>>shift2)&index2Mask)]) << indexShift) + int(c&dataMask)
+}
+
+func indexRaw(offset int32, index []uint16, c rune) int {
+ return int(index[offset+(c>>shift2)]<> shift1
+
+ /** Number of code points per index-1 table entry. 2048=0x800 */
+ cpPerIndex1Entry = 1 << shift1
+
+ /** Number of entries in an index-2 block. 64=0x40 */
+ index2BlockLength = 1 << shift1min2
+
+ /** Mask for getting the lower bits for the in-index-2-block offset. */
+ index2Mask = index2BlockLength - 1
+
+ /** Number of entries in a data block. 32=0x20 */
+ dataBlockLength = 1 << shift2
+
+ /** Mask for getting the lower bits for the in-data-block offset. */
+ dataMask = dataBlockLength - 1
+
+ /**
+ * Shift size for shifting left the index array values.
+ * Increases possible data size with 16-bit index values at the cost
+ * of compactability.
+ * This requires data blocks to be aligned by UTRIE2_DATA_GRANULARITY.
+ */
+ indexShift = 2
+
+ /** The alignment size of a data block. Also the granularity for compaction. */
+ dataGranularity = 1 << indexShift
+
+ /* Fixed layout of the first part of the index array. ------------------- */
+
+ /**
+ * The part of the index-2 table for U+D800..U+DBFF stores values for
+ * lead surrogate code _units_ not code _points_.
+ * Values for lead surrogate code _points_ are indexed with this portion of the table.
+ * Length=32=0x20=0x400>>UTRIE2_SHIFT_2. (There are 1024=0x400 lead surrogates.)
+ */
+ lscpIndex2Offset = 0x10000 >> shift2
+ lscpIndex2Length = 0x400 >> shift2
+
+ /** Count the lengths of both BMP pieces. 2080=0x820 */
+ index2BmpLength = lscpIndex2Offset + lscpIndex2Length
+
+ /**
+ * The 2-byte UTF-8 version of the index-2 table follows at offset 2080=0x820.
+ * Length 32=0x20 for lead bytes C0..DF, regardless of UTRIE2_SHIFT_2.
+ */
+ utf82BIndex2Offset = index2BmpLength
+ utf82BIndex2Length = 0x800 >> 6 /* U+0800 is the first code point after 2-byte UTF-8 */
+
+ /**
+ * The index-1 table, only used for supplementary code points, at offset 2112=0x840.
+ * Variable length, for code points up to highStart, where the last single-value range starts.
+ * Maximum length 512=0x200=0x100000>>UTRIE2_SHIFT_1.
+ * (For 0x100000 supplementary code points U+10000..U+10ffff.)
+ *
+ * The part of the index-2 table for supplementary code points starts
+ * after this index-1 table.
+ *
+ * Both the index-1 table and the following part of the index-2 table
+ * are omitted completely if there is only BMP data.
+ */
+ index1Offset = utf82BIndex2Offset + utf82BIndex2Length
+ maxIndex1Length = 0x100000 >> shift1
+
+ /*
+ * Fixed layout of the first part of the data array. -----------------------
+ * Starts with 4 blocks (128=0x80 entries) for ASCII.
+ */
+
+ /**
+ * The illegal-UTF-8 data block follows the ASCII block, at offset 128=0x80.
+ * Used with linear access for single bytes 0..0xbf for simple error handling.
+ * Length 64=0x40, not UTRIE2_DATA_BLOCK_LENGTH.
+ */
+ badUtf8DataOffset = 0x80
+)
+
+func UTrie2FromBytes(bytes *udata.Bytes) (*UTrie2, error) {
+ type utrie2Header struct {
+ /** "Tri2" in big-endian US-ASCII (0x54726932) */
+ signature uint32
+
+ /**
+ * options bit field:
+ * 15.. 4 reserved (0)
+ * 3.. 0 UTrie2ValueBits valueBits
+ */
+ options uint16
+
+ /** UTRIE2_INDEX_1_OFFSET..UTRIE2_MAX_INDEX_LENGTH */
+ indexLength uint16
+
+ /** (UTRIE2_DATA_START_OFFSET..UTRIE2_MAX_DATA_LENGTH)>>UTRIE2_INDEX_SHIFT */
+ shiftedDataLength uint16
+
+ /** Null index and data blocks, not shifted. */
+ index2NullOffset, dataNullOffset uint16
+
+ /**
+ * First code point of the single-value range ending with U+10ffff,
+ * rounded up and then shifted right by UTRIE2_SHIFT_1.
+ */
+ shiftedHighStart uint16
+ }
+
+ var header utrie2Header
+ header.signature = bytes.Uint32()
+
+ switch header.signature {
+ case 0x54726932:
+ case 0x32697254:
+ return nil, errors.New("unsupported: BigEndian encoding")
+ default:
+ return nil, fmt.Errorf("invalid signature for Trie2: 0x%08x", header.signature)
+ }
+
+ header.options = bytes.Uint16()
+ header.indexLength = bytes.Uint16()
+ header.shiftedDataLength = bytes.Uint16()
+ header.index2NullOffset = bytes.Uint16()
+ header.dataNullOffset = bytes.Uint16()
+ header.shiftedHighStart = bytes.Uint16()
+
+ var width int
+ switch header.options & 0xf {
+ case 0:
+ width = 16
+ case 1:
+ width = 32
+ default:
+ return nil, errors.New("invalid width for serialized UTrie2")
+ }
+
+ trie := &UTrie2{
+ indexLength: int(header.indexLength),
+ dataLength: int(header.shiftedDataLength) << indexShift,
+ index2NullOffset: header.index2NullOffset,
+ dataNullOffset: header.dataNullOffset,
+ HighStart: rune(header.shiftedHighStart) << shift1,
+ }
+
+ trie.HighValueIndex = trie.dataLength - dataGranularity
+ if width == 16 {
+ trie.HighValueIndex += trie.indexLength
+ }
+
+ indexArraySize := trie.indexLength
+ if width == 16 {
+ indexArraySize += trie.dataLength
+ }
+
+ trie.index = bytes.Uint16Slice(int32(indexArraySize))
+
+ if width == 16 {
+ trie.data16 = trie.index[trie.indexLength:]
+ trie.InitialValue = uint32(trie.index[trie.dataNullOffset])
+ trie.ErrorValue = uint32(trie.index[trie.indexLength+badUtf8DataOffset])
+ } else {
+ trie.data32 = bytes.Uint32Slice(int32(trie.dataLength))
+ trie.InitialValue = trie.data32[trie.dataNullOffset]
+ trie.ErrorValue = trie.data32[badUtf8DataOffset]
+ }
+
+ return trie, nil
+}
diff --git a/go/mysql/icuregex/matcher.go b/go/mysql/icuregex/matcher.go
new file mode 100644
index 00000000000..1b5495f495f
--- /dev/null
+++ b/go/mysql/icuregex/matcher.go
@@ -0,0 +1,1671 @@
+/*
+© 2016 and later: Unicode, Inc. and others.
+Copyright (C) 2004-2015, International Business Machines Corporation and others.
+Copyright 2023 The Vitess Authors.
+
+This file contains code derived from the Unicode Project's ICU library.
+License & terms of use for the original code: http://www.unicode.org/copyright.html
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package icuregex
+
+import (
+ "fmt"
+ "io"
+
+ "vitess.io/vitess/go/mysql/icuregex/internal/ucase"
+ "vitess.io/vitess/go/mysql/icuregex/internal/uchar"
+ "vitess.io/vitess/go/mysql/icuregex/internal/uprops"
+)
+
+const timerInitialValue = 10000
+const defaultTimeout = 3
+const defaultStackLimit = 0
+
+type Matcher struct {
+ pattern *Pattern
+
+ input []rune
+
+ regionStart int // Start of the input region, default = 0.
+ regionLimit int // End of input region, default to input.length.
+
+ anchorStart int // Region bounds for anchoring operations (^ or $).
+ anchorLimit int // See useAnchoringBounds
+
+ lookStart int // Region bounds for look-ahead/behind and
+ lookLimit int // and other boundary tests. See
+ // useTransparentBounds
+
+ activeStart int // Currently active bounds for matching.
+ activeLimit int // Usually is the same as region, but
+ // is changed to fLookStart/Limit when
+ // entering look around regions.
+
+ match bool // True if the last attempted match was successful.
+ matchStart int // Position of the start of the most recent match
+ matchEnd int // First position after the end of the most recent match
+ // Zero if no previous match, even when a region
+ // is active.
+ lastMatchEnd int // First position after the end of the previous match,
+ // or -1 if there was no previous match.
+ appendPosition int // First position after the end of the previous
+ // appendReplacement(). As described by the
+ // JavaDoc for Java Matcher, where it is called
+ // "append position"
+ hitEnd bool // True if the last match touched the end of input.
+ requireEnd bool // True if the last match required end-of-input
+ // (matched $ or Z)
+
+ stack stack
+ frame stackFrame // After finding a match, the last active stack frame,
+ // which will contain the capture group results.
+ // NOT valid while match engine is running.
+
+ data []int // Data area for use by the compiled pattern.
+
+ timeLimit int32 // Max time (in arbitrary steps) to let the
+ // match engine run. Zero for unlimited.
+
+ time int32 // Match time, accumulates while matching.
+ tickCounter int32 // Low bits counter for time. Counts down StateSaves.
+ // Kept separately from fTime to keep as much
+ // code as possible out of the inline
+ // StateSave function.
+
+ dumper io.Writer
+}
+
+func NewMatcher(pat *Pattern) *Matcher {
+ m := &Matcher{
+ pattern: pat,
+ data: make([]int, pat.dataSize),
+ stack: stack{
+ frameSize: pat.frameSize,
+ stackLimit: defaultStackLimit,
+ },
+ timeLimit: defaultTimeout,
+ }
+ m.reset()
+ return m
+}
+
+func (m *Matcher) MatchAt(startIdx int, toEnd bool) error {
+ //--------------------------------------------------------------------------------
+ //
+ // MatchAt This is the actual matching engine.
+ //
+ // startIdx: begin matching a this index.
+ // toEnd: if true, match must extend to end of the input region
+ //
+ //--------------------------------------------------------------------------------
+ var err error
+ var isMatch bool // True if the we have a match.
+
+ if m.dumper != nil {
+ fmt.Fprintf(m.dumper, "MatchAt(startIdx=%d)\n", startIdx)
+ fmt.Fprintf(m.dumper, "Original Pattern: \"%s\"\n", m.pattern.pattern)
+ fmt.Fprintf(m.dumper, "Input String: \"%s\"\n\n", string(m.input))
+ }
+
+ pat := m.pattern.compiledPat
+ inputText := m.input
+ litText := m.pattern.literalText
+ sets := m.pattern.sets
+
+ fp := m.resetStack()
+ *fp.inputIdx() = startIdx
+ *fp.patIdx() = 0
+ for i := 0; i < len(m.data); i++ {
+ m.data[i] = 0
+ }
+
+ for {
+ op := pat[*fp.patIdx()]
+
+ if m.dumper != nil {
+ fmt.Fprintf(m.dumper, "inputIdx=%d inputChar=%x sp=%3d activeLimit=%d ", *fp.inputIdx(),
+ charAt(inputText, *fp.inputIdx()), m.stack.sp(), m.activeLimit)
+ m.pattern.dumpOp(m.dumper, *fp.patIdx())
+ }
+
+ *fp.patIdx()++
+
+ switch op.typ() {
+ case urxNop:
+ // Nothing to do.
+ case urxBacktrack:
+ // Force a backtrack. In some circumstances, the pattern compiler
+ // will notice that the pattern can't possibly match anything, and will
+ // emit one of these at that point.
+ fp = m.stack.popFrame()
+ case urxOnechar:
+ if *fp.inputIdx() < m.activeLimit {
+ c := charAt(inputText, *fp.inputIdx())
+ *fp.inputIdx()++
+ if c == rune(op.value()) {
+ break
+ }
+ } else {
+ m.hitEnd = true
+ }
+ fp = m.stack.popFrame()
+ case urxString:
+ // Test input against a literal string.
+ // Strings require two slots in the compiled pattern, one for the
+ // offset to the string text, and one for the length.
+ stringStartIdx := op.value()
+ nextOp := pat[*fp.patIdx()] // Fetch the second operand
+ *fp.patIdx()++
+ stringLen := nextOp.value()
+
+ patternString := litText[stringStartIdx:]
+ var patternStringIndex int
+ success := true
+ for patternStringIndex < stringLen {
+ if *fp.inputIdx() >= m.activeLimit {
+ m.hitEnd = true
+ success = false
+ break
+ }
+ if charAt(patternString, patternStringIndex) != charAt(inputText, *fp.inputIdx()) {
+ success = false
+ break
+ }
+ patternStringIndex++
+ *fp.inputIdx()++
+ }
+
+ if !success {
+ fp = m.stack.popFrame()
+ }
+ case urxStateSave:
+ fp, err = m.stateSave(*fp.inputIdx(), op.value())
+ if err != nil {
+ return err
+ }
+ case urxEnd:
+ // The match loop will exit via this path on a successful match,
+ // when we reach the end of the pattern.
+ if toEnd && *fp.inputIdx() != m.activeLimit {
+ // The pattern matched, but not to the end of input. Try some more.
+ fp = m.stack.popFrame()
+ break
+ }
+ isMatch = true
+ goto breakFromLoop
+
+ // Start and End Capture stack frame variables are laid out out like this:
+ // fp->fExtra[opValue] - The start of a completed capture group
+ // opValue+1 - The end of a completed capture group
+ // opValue+2 - the start of a capture group whose end
+ // has not yet been reached (and might not ever be).
+ case urxStartCapture:
+ *fp.extra(op.value() + 2) = *fp.inputIdx()
+ case urxEndCapture:
+ *fp.extra(op.value()) = *fp.extra(op.value() + 2) // Tentative start becomes real.
+ *fp.extra(op.value() + 1) = *fp.inputIdx() // End position
+
+ case urxDollar: // $, test for End of line
+ if *fp.inputIdx() < m.anchorLimit-2 {
+ fp = m.stack.popFrame()
+ break
+ }
+ // or for position before new line at end of input
+ if *fp.inputIdx() >= m.anchorLimit {
+ // We really are at the end of input. Success.
+ m.hitEnd = true
+ m.requireEnd = true
+ break
+ }
+
+ if *fp.inputIdx() == m.anchorLimit-1 {
+ c := m.input[*fp.inputIdx()]
+ if isLineTerminator(c) {
+ if !(c == 0x0a && *fp.inputIdx() > m.anchorStart && m.input[*fp.inputIdx()-1] == 0x0d) {
+ // At new-line at end of input. Success
+ m.hitEnd = true
+ m.requireEnd = true
+ break
+ }
+ }
+ } else if *fp.inputIdx() == m.anchorLimit-2 && m.input[*fp.inputIdx()] == 0x0d && m.input[*fp.inputIdx()+1] == 0x0a {
+ m.hitEnd = true
+ m.requireEnd = true
+ break // At CR/LF at end of input. Success
+ }
+ fp = m.stack.popFrame()
+
+ case urxDollarD: // $, test for End of Line, in UNIX_LINES mode.
+ if *fp.inputIdx() >= m.anchorLimit {
+ // Off the end of input. Success.
+ m.hitEnd = true
+ m.requireEnd = true
+ break
+ }
+ c := charAt(inputText, *fp.inputIdx())
+ *fp.inputIdx()++
+ // Either at the last character of input, or off the end.
+ if c == 0x0a && *fp.inputIdx() == m.anchorLimit {
+ m.hitEnd = true
+ m.requireEnd = true
+ break
+ }
+
+ // Not at end of input. Back-track out.
+ fp = m.stack.popFrame()
+ case urxDollarM: // $, test for End of line in multi-line mode
+ if *fp.inputIdx() >= m.anchorLimit {
+ // We really are at the end of input. Success.
+ m.hitEnd = true
+ m.requireEnd = true
+ break
+ }
+ // If we are positioned just before a new-line, succeed.
+ // It makes no difference where the new-line is within the input.
+ c := charAt(inputText, *fp.inputIdx())
+ if isLineTerminator(c) {
+ // At a line end, except for the odd chance of being in the middle of a CR/LF sequence
+ // In multi-line mode, hitting a new-line just before the end of input does not
+ // set the hitEnd or requireEnd flags
+ if !(c == 0x0a && *fp.inputIdx() > m.anchorStart && charAt(inputText, *fp.inputIdx()-1) == 0x0d) {
+ break
+ }
+ }
+ // not at a new line. Fail.
+ fp = m.stack.popFrame()
+ case urxDollarMd: // $, test for End of line in multi-line and UNIX_LINES mode
+ if *fp.inputIdx() >= m.anchorLimit {
+ // We really are at the end of input. Success.
+ m.hitEnd = true
+ m.requireEnd = true // Java set requireEnd in this case, even though
+ break // adding a new-line would not lose the match.
+ }
+ // If we are not positioned just before a new-line, the test fails; backtrack out.
+ // It makes no difference where the new-line is within the input.
+ if charAt(inputText, *fp.inputIdx()) != 0x0a {
+ fp = m.stack.popFrame()
+ }
+ case urxCaret: // ^, test for start of line
+ if *fp.inputIdx() != m.anchorStart {
+ fp = m.stack.popFrame()
+ }
+ case urxCaretM: // ^, test for start of line in mulit-line mode
+ if *fp.inputIdx() == m.anchorStart {
+ // We are at the start input. Success.
+ break
+ }
+ // Check whether character just before the current pos is a new-line
+ // unless we are at the end of input
+ c := charAt(inputText, *fp.inputIdx()-1)
+ if (*fp.inputIdx() < m.anchorLimit) && isLineTerminator(c) {
+ // It's a new-line. ^ is true. Success.
+ // TODO: what should be done with positions between a CR and LF?
+ break
+ }
+ // Not at the start of a line. Fail.
+ fp = m.stack.popFrame()
+ case urxCaretMUnix: // ^, test for start of line in mulit-line + Unix-line mode
+ if *fp.inputIdx() <= m.anchorStart {
+ // We are at the start input. Success.
+ break
+ }
+
+ c := charAt(inputText, *fp.inputIdx()-1)
+ if c != 0x0a {
+ // Not at the start of a line. Back-track out.
+ fp = m.stack.popFrame()
+ }
+ case urxBackslashB: // Test for word boundaries
+ success := m.isWordBoundary(*fp.inputIdx())
+ success = success != (op.value() != 0) // flip sense for \B
+ if !success {
+ fp = m.stack.popFrame()
+ }
+ case urxBackslashBu: // Test for word boundaries, Unicode-style
+ success := m.isUWordBoundary(*fp.inputIdx())
+ success = success != (op.value() != 0) // flip sense for \B
+ if !success {
+ fp = m.stack.popFrame()
+ }
+ case urxBackslashD: // Test for decimal digit
+ if *fp.inputIdx() >= m.activeLimit {
+ m.hitEnd = true
+ fp = m.stack.popFrame()
+ break
+ }
+
+ c := charAt(inputText, *fp.inputIdx())
+
+ success := m.isDecimalDigit(c)
+ success = success != (op.value() != 0) // flip sense for \D
+ if success {
+ *fp.inputIdx()++
+ } else {
+ fp = m.stack.popFrame()
+ }
+
+ case urxBackslashG: // Test for position at end of previous match
+ if !((m.match && *fp.inputIdx() == m.matchEnd) || (!m.match && *fp.inputIdx() == m.activeStart)) {
+ fp = m.stack.popFrame()
+ }
+
+ case urxBackslashH: // Test for \h, horizontal white space.
+ if *fp.inputIdx() >= m.activeLimit {
+ m.hitEnd = true
+ fp = m.stack.popFrame()
+ break
+ }
+
+ c := charAt(inputText, *fp.inputIdx())
+ success := m.isHorizWS(c) || c == 9
+ success = success != (op.value() != 0) // flip sense for \H
+ if success {
+ *fp.inputIdx()++
+ } else {
+ fp = m.stack.popFrame()
+ }
+
+ case urxBackslashR: // Test for \R, any line break sequence.
+ if *fp.inputIdx() >= m.activeLimit {
+ m.hitEnd = true
+ fp = m.stack.popFrame()
+ break
+ }
+ c := charAt(inputText, *fp.inputIdx())
+ if isLineTerminator(c) {
+ if c == 0x0d && charAt(inputText, *fp.inputIdx()+1) == 0x0a {
+ *fp.inputIdx()++
+ }
+ *fp.inputIdx()++
+ } else {
+ fp = m.stack.popFrame()
+ }
+
+ case urxBackslashV: // \v, any single line ending character.
+ if *fp.inputIdx() >= m.activeLimit {
+ m.hitEnd = true
+ fp = m.stack.popFrame()
+ break
+ }
+ c := charAt(inputText, *fp.inputIdx())
+ success := isLineTerminator(c)
+ success = success != (op.value() != 0) // flip sense for \V
+ if success {
+ *fp.inputIdx()++
+ } else {
+ fp = m.stack.popFrame()
+ }
+
+ case urxBackslashX:
+ // Match a Grapheme, as defined by Unicode UAX 29.
+
+ // Fail if at end of input
+ if *fp.inputIdx() >= m.activeLimit {
+ m.hitEnd = true
+ fp = m.stack.popFrame()
+ break
+ }
+
+ *fp.inputIdx() = m.followingGCBoundary(*fp.inputIdx())
+ if *fp.inputIdx() >= m.activeLimit {
+ m.hitEnd = true
+ *fp.inputIdx() = m.activeLimit
+ }
+
+ case urxBackslashZ: // Test for end of Input
+ if *fp.inputIdx() < m.anchorLimit {
+ fp = m.stack.popFrame()
+ } else {
+ m.hitEnd = true
+ m.requireEnd = true
+ }
+ case urxStaticSetref:
+ // Test input character against one of the predefined sets
+ // (Word Characters, for example)
+ // The high bit of the op value is a flag for the match polarity.
+ // 0: success if input char is in set.
+ // 1: success if input char is not in set.
+ if *fp.inputIdx() >= m.activeLimit {
+ m.hitEnd = true
+ fp = m.stack.popFrame()
+ break
+ }
+
+ success := (op.value() & urxNegSet) == urxNegSet
+ negOp := op.value() & ^urxNegSet
+
+ c := charAt(inputText, *fp.inputIdx())
+ s := staticPropertySets[negOp]
+ if s.ContainsRune(c) {
+ success = !success
+ }
+
+ if success {
+ *fp.inputIdx()++
+ } else {
+ // the character wasn't in the set.
+ fp = m.stack.popFrame()
+ }
+ case urxStatSetrefN:
+ // Test input character for NOT being a member of one of
+ // the predefined sets (Word Characters, for example)
+ if *fp.inputIdx() >= m.activeLimit {
+ m.hitEnd = true
+ fp = m.stack.popFrame()
+ break
+ }
+
+ c := charAt(inputText, *fp.inputIdx())
+ s := staticPropertySets[op.value()]
+ if !s.ContainsRune(c) {
+ *fp.inputIdx()++
+ break
+ }
+ // the character wasn't in the set.
+ fp = m.stack.popFrame()
+
+ case urxSetref:
+ if *fp.inputIdx() >= m.activeLimit {
+ m.hitEnd = true
+ fp = m.stack.popFrame()
+ break
+ }
+
+ // There is input left. Pick up one char and test it for set membership.
+ c := charAt(inputText, *fp.inputIdx())
+
+ s := sets[op.value()]
+ if s.ContainsRune(c) {
+ *fp.inputIdx()++
+ break
+ }
+
+ // the character wasn't in the set.
+ fp = m.stack.popFrame()
+
+ case urxDotany:
+ // . matches anything, but stops at end-of-line.
+ if *fp.inputIdx() >= m.activeLimit {
+ m.hitEnd = true
+ fp = m.stack.popFrame()
+ break
+ }
+
+ c := charAt(inputText, *fp.inputIdx())
+ if isLineTerminator(c) {
+ // End of line in normal mode. . does not match.
+ fp = m.stack.popFrame()
+ break
+ }
+ *fp.inputIdx()++
+
+ case urxDotanyAll:
+ // ., in dot-matches-all (including new lines) mode
+ if *fp.inputIdx() >= m.activeLimit {
+ // At end of input. Match failed. Backtrack out.
+ m.hitEnd = true
+ fp = m.stack.popFrame()
+ break
+ }
+
+ c := charAt(inputText, *fp.inputIdx())
+ *fp.inputIdx()++
+ if c == 0x0d && *fp.inputIdx() < m.activeLimit {
+ // In the case of a CR/LF, we need to advance over both.
+ nextc := charAt(inputText, *fp.inputIdx())
+ if nextc == 0x0a {
+ *fp.inputIdx()++
+ }
+ }
+
+ case urxDotanyUnix:
+ // '.' operator, matches all, but stops at end-of-line.
+ // UNIX_LINES mode, so 0x0a is the only recognized line ending.
+ if *fp.inputIdx() >= m.activeLimit {
+ // At end of input. Match failed. Backtrack out.
+ m.hitEnd = true
+ fp = m.stack.popFrame()
+ break
+ }
+
+ // There is input left. Advance over one char, unless we've hit end-of-line
+ c := charAt(inputText, *fp.inputIdx())
+ if c == 0x0a {
+ // End of line in normal mode. '.' does not match the \n
+ fp = m.stack.popFrame()
+ } else {
+ *fp.inputIdx()++
+ }
+ case urxJmp:
+ *fp.patIdx() = op.value()
+
+ case urxFail:
+ isMatch = false
+ goto breakFromLoop
+
+ case urxJmpSav:
+ fp, err = m.stateSave(*fp.inputIdx(), *fp.patIdx()) // State save to loc following current
+ if err != nil {
+ return err
+ }
+ *fp.patIdx() = op.value() // Then JMP.
+
+ case urxJmpSavX:
+ // This opcode is used with (x)+, when x can match a zero length string.
+ // Same as JMP_SAV, except conditional on the match having made forward progress.
+ // Destination of the JMP must be a URX_STO_INP_LOC, from which we get the
+ // data address of the input position at the start of the loop.
+ stoOp := pat[op.value()-1]
+ frameLoc := stoOp.value()
+
+ prevInputIdx := *fp.extra(frameLoc)
+ if prevInputIdx < *fp.inputIdx() {
+ // The match did make progress. Repeat the loop.
+ fp, err = m.stateSave(*fp.inputIdx(), *fp.patIdx()) // State save to loc following current
+ if err != nil {
+ return err
+ }
+ *fp.patIdx() = op.value() // Then JMP.
+ *fp.extra(frameLoc) = *fp.inputIdx()
+ }
+ // If the input position did not advance, we do nothing here,
+ // execution will fall out of the loop.
+
+ case urxCtrInit:
+ *fp.extra(op.value()) = 0 // Set the loop counter variable to zero
+
+ // Pick up the three extra operands that CTR_INIT has, and
+ // skip the pattern location counter past
+ instOperandLoc := *fp.patIdx()
+ *fp.patIdx() += 3 // Skip over the three operands that CTR_INIT has.
+
+ loopLoc := pat[instOperandLoc].value()
+ minCount := int(pat[instOperandLoc+1])
+ maxCount := int(pat[instOperandLoc+2])
+
+ if minCount == 0 {
+ fp, err = m.stateSave(*fp.inputIdx(), loopLoc+1)
+ if err != nil {
+ return err
+ }
+ }
+ if maxCount == -1 {
+ *fp.extra(op.value() + 1) = *fp.inputIdx() // For loop breaking.
+ } else if maxCount == 0 {
+ fp = m.stack.popFrame()
+ }
+
+ case utxCtrLoop:
+ initOp := pat[op.value()]
+ opValue := initOp.value()
+ pCounter := fp.extra(opValue)
+ minCount := int(pat[op.value()+2])
+ maxCount := int(pat[op.value()+3])
+ *pCounter++
+ if *pCounter >= maxCount && maxCount != -1 {
+ break
+ }
+
+ if *pCounter >= minCount {
+ if maxCount == -1 {
+ // Loop has no hard upper bound.
+ // Check that it is progressing through the input, break if it is not.
+ pLastIntputIdx := fp.extra(opValue + 1)
+ if *pLastIntputIdx == *fp.inputIdx() {
+ break
+ }
+ *pLastIntputIdx = *fp.inputIdx()
+ }
+ fp, err = m.stateSave(*fp.inputIdx(), *fp.patIdx())
+ if err != nil {
+ return err
+ }
+ } else {
+ // Increment time-out counter. (StateSave() does it if count >= minCount)
+ m.tickCounter--
+ if m.tickCounter <= 0 {
+ if err = m.incrementTime(*fp.inputIdx()); err != nil {
+ return err
+ } // Re-initializes fTickCounter
+ }
+ }
+
+ *fp.patIdx() = op.value() + 4 // Loop back.
+
+ case urxCtrInitNg:
+ *fp.extra(op.value()) = 0 // Set the loop counter variable to zero
+
+ // Pick up the three extra operands that CTR_INIT_NG has, and
+ // skip the pattern location counter past
+ instrOperandLoc := *fp.patIdx()
+ *fp.patIdx() += 3
+ loopLoc := pat[instrOperandLoc].value()
+ minCount := pat[instrOperandLoc+1].value()
+ maxCount := pat[instrOperandLoc+2].value()
+
+ if maxCount == -1 {
+ *fp.extra(op.value() + 1) = *fp.inputIdx() // Save initial input index for loop breaking.
+ }
+
+ if minCount == 0 {
+ if maxCount != 0 {
+ fp, err = m.stateSave(*fp.inputIdx(), *fp.patIdx())
+ if err != nil {
+ return err
+ }
+ }
+ *fp.patIdx() = loopLoc + 1
+ }
+
+ case urxCtrLoopNg:
+ initOp := pat[op.value()]
+ pCounter := fp.extra(initOp.value())
+ minCount := int(pat[op.value()+2])
+ maxCount := int(pat[op.value()+3])
+ *pCounter++
+ if *pCounter >= maxCount && maxCount != -1 {
+ // The loop has matched the maximum permitted number of times.
+ // Break out of here with no action. Matching will
+ // continue with the following pattern.
+ break
+ }
+
+ if *pCounter < minCount {
+ // We haven't met the minimum number of matches yet.
+ // Loop back for another one.
+ *fp.patIdx() = op.value() + 4 // Loop back.
+ // Increment time-out counter. (StateSave() does it if count >= minCount)
+ m.tickCounter--
+ if m.tickCounter <= 0 {
+ if err = m.incrementTime(*fp.inputIdx()); err != nil {
+ return err
+ } // Re-initializes fTickCounter
+ }
+ } else {
+ // We do have the minimum number of matches.
+
+ // If there is no upper bound on the loop iterations, check that the input index
+ // is progressing, and stop the loop if it is not.
+ if maxCount == -1 {
+ lastInputIdx := fp.extra(initOp.value() + 1)
+ if *fp.inputIdx() == *lastInputIdx {
+ break
+ }
+ *lastInputIdx = *fp.inputIdx()
+ }
+ }
+
+ // Loop Continuation: we will fall into the pattern following the loop
+ // (non-greedy, don't execute loop body first), but first do
+ // a state save to the top of the loop, so that a match failure
+ // in the following pattern will try another iteration of the loop.
+ fp, err = m.stateSave(*fp.inputIdx(), op.value()+4)
+ if err != nil {
+ return err
+ }
+
+ case urxStoSp:
+ m.data[op.value()] = m.stack.len()
+
+ case urxLdSp:
+ newStackSize := m.data[op.value()]
+ newFp := m.stack.offset(newStackSize)
+ if newFp.equals(fp) {
+ break
+ }
+ copy(newFp, fp)
+ fp = newFp
+
+ m.stack.setSize(newStackSize)
+ case urxBackref:
+ groupStartIdx := *fp.extra(op.value())
+ groupEndIdx := *fp.extra(op.value() + 1)
+
+ if groupStartIdx < 0 {
+ // This capture group has not participated in the match thus far,
+ fp = m.stack.popFrame() // FAIL, no match.
+ break
+ }
+
+ success := true
+ for {
+ if groupStartIdx >= groupEndIdx {
+ success = true
+ break
+ }
+
+ if *fp.inputIdx() >= m.activeLimit {
+ success = false
+ m.hitEnd = true
+ break
+ }
+
+ captureGroupChar := charAt(inputText, groupStartIdx)
+ inputChar := charAt(inputText, *fp.inputIdx())
+ groupStartIdx++
+ *fp.inputIdx()++
+ if inputChar != captureGroupChar {
+ success = false
+ break
+ }
+ }
+
+ if !success {
+ fp = m.stack.popFrame()
+ }
+ case urxBackrefI:
+ groupStartIdx := *fp.extra(op.value())
+ groupEndIdx := *fp.extra(op.value() + 1)
+
+ if groupStartIdx < 0 {
+ // This capture group has not participated in the match thus far,
+ fp = m.stack.popFrame() // FAIL, no match.
+ break
+ }
+
+ captureGroupItr := newCaseFoldIterator(m.input, groupStartIdx, groupEndIdx)
+ inputItr := newCaseFoldIterator(m.input, *fp.inputIdx(), m.activeLimit)
+ success := true
+
+ for {
+ captureGroupChar := captureGroupItr.next()
+ if captureGroupChar == -1 {
+ success = true
+ break
+ }
+ inputChar := inputItr.next()
+ if inputChar == -1 {
+ success = false
+ m.hitEnd = true
+ break
+ }
+ if inputChar != captureGroupChar {
+ success = false
+ break
+ }
+ }
+
+ if success && inputItr.inExpansion() {
+ // We otained a match by consuming part of a string obtained from
+ // case-folding a single code point of the input text.
+ // This does not count as an overall match.
+ success = false
+ }
+
+ if success {
+ *fp.inputIdx() = inputItr.index
+ } else {
+ fp = m.stack.popFrame()
+ }
+
+ case urxStoInpLoc:
+ *fp.extra(op.value()) = *fp.inputIdx()
+
+ case urxJmpx:
+ instrOperandLoc := *fp.patIdx()
+ *fp.patIdx()++
+ dataLoc := pat[instrOperandLoc].value()
+
+ saveInputIdx := *fp.extra(dataLoc)
+
+ if saveInputIdx < *fp.inputIdx() {
+ *fp.patIdx() = op.value() // JMP
+ } else {
+ fp = m.stack.popFrame() // FAIL, no progress in loop.
+ }
+
+ case urxLaStart:
+ m.data[op.value()] = m.stack.len()
+ m.data[op.value()+1] = *fp.inputIdx()
+ m.data[op.value()+2] = m.activeStart
+ m.data[op.value()+3] = m.activeLimit
+ m.activeStart = m.lookStart // Set the match region change for
+ m.activeLimit = m.lookLimit // transparent bounds.
+
+ case urxLaEnd:
+ stackSize := m.stack.len()
+ newStackSize := m.data[op.value()]
+ if stackSize > newStackSize {
+ // Copy the current top frame back to the new (cut back) top frame.
+ // This makes the capture groups from within the look-ahead
+ // expression available.
+ newFp := m.stack.offset(newStackSize)
+ copy(newFp, fp)
+ fp = newFp
+ m.stack.setSize(newStackSize)
+ }
+
+ *fp.inputIdx() = m.data[op.value()+1]
+
+ m.activeStart = m.data[op.value()+2]
+ m.activeLimit = m.data[op.value()+3]
+
+ case urcOnecharI:
+ // Case insensitive one char. The char from the pattern is already case folded.
+ // Input text is not, but case folding the input can not reduce two or more code
+ // points to one.
+ if *fp.inputIdx() < m.activeLimit {
+ c := charAt(inputText, *fp.inputIdx())
+ if ucase.Fold(c) == op.value32() {
+ *fp.inputIdx()++
+ break
+ }
+ } else {
+ m.hitEnd = true
+ }
+
+ fp = m.stack.popFrame()
+
+ case urxStringI:
+ // Case-insensitive test input against a literal string.
+ // Strings require two slots in the compiled pattern, one for the
+ // offset to the string text, and one for the length.
+ // The compiled string has already been case folded.
+ patternString := litText[op.value():]
+ var patternStringIdx int
+ nextOp := pat[*fp.patIdx()]
+ *fp.patIdx()++
+ patternStringLen := nextOp.value()
+
+ success := true
+
+ it := newCaseFoldIterator(inputText, *fp.inputIdx(), m.activeLimit)
+ for patternStringIdx < patternStringLen {
+ cText := it.next()
+ cPattern := patternString[patternStringIdx]
+ patternStringIdx++
+
+ if cText != cPattern {
+ success = false
+ if cText == -1 {
+ m.hitEnd = true
+ }
+ break
+ }
+ }
+ if it.inExpansion() {
+ success = false
+ }
+
+ if success {
+ *fp.inputIdx() = it.index
+ } else {
+ fp = m.stack.popFrame()
+ }
+
+ case urxLbStart:
+ // Entering a look-behind block.
+ // Save Stack Ptr, Input Pos and active input region.
+ // TODO: implement transparent bounds. Ticket #6067
+ m.data[op.value()] = m.stack.len()
+ m.data[op.value()+1] = *fp.inputIdx()
+ // Save input string length, then reset to pin any matches to end at
+ // the current position.
+ m.data[op.value()+2] = m.activeStart
+ m.data[op.value()+3] = m.activeLimit
+ m.activeStart = m.regionStart
+ m.activeLimit = *fp.inputIdx()
+ // Init the variable containing the start index for attempted matches.
+ m.data[op.value()+4] = -1
+ case urxLbCont:
+ // Positive Look-Behind, at top of loop checking for matches of LB expression
+ // at all possible input starting positions.
+
+ // Fetch the min and max possible match lengths. They are the operands
+ // of this op in the pattern.
+ minML := pat[*fp.patIdx()]
+ *fp.patIdx()++
+ maxML := pat[*fp.patIdx()]
+ *fp.patIdx()++
+
+ lbStartIdx := &m.data[op.value()+4]
+ if *lbStartIdx < 0 {
+ // First time through loop.
+ *lbStartIdx = *fp.inputIdx() - int(minML)
+ if *lbStartIdx > 0 {
+ *lbStartIdx = *fp.inputIdx()
+ }
+ } else {
+ // 2nd through nth time through the loop.
+ // Back up start position for match by one.
+ *lbStartIdx--
+ }
+
+ if *lbStartIdx < 0 || *lbStartIdx < *fp.inputIdx()-int(maxML) {
+ // We have tried all potential match starting points without
+ // getting a match. Backtrack out, and out of the
+ // Look Behind altogether.
+ fp = m.stack.popFrame()
+ m.activeStart = m.data[op.value()+2]
+ m.activeLimit = m.data[op.value()+3]
+ break
+ }
+
+ // Save state to this URX_LB_CONT op, so failure to match will repeat the loop.
+ // (successful match will fall off the end of the loop.)
+ fp, err = m.stateSave(*fp.inputIdx(), *fp.patIdx()-3)
+ if err != nil {
+ return err
+ }
+ *fp.inputIdx() = *lbStartIdx
+
+ case urxLbEnd:
+ // End of a look-behind block, after a successful match.
+ if *fp.inputIdx() != m.activeLimit {
+ // The look-behind expression matched, but the match did not
+ // extend all the way to the point that we are looking behind from.
+ // FAIL out of here, which will take us back to the LB_CONT, which
+ // will retry the match starting at another position or fail
+ // the look-behind altogether, whichever is appropriate.
+ fp = m.stack.popFrame()
+ break
+ }
+
+ // Look-behind match is good. Restore the orignal input string region,
+ // which had been truncated to pin the end of the lookbehind match to the
+ // position being looked-behind.
+ m.activeStart = m.data[op.value()+2]
+ m.activeLimit = m.data[op.value()+3]
+ case urxLbnCount:
+ // Negative Look-Behind, at top of loop checking for matches of LB expression
+ // at all possible input starting positions.
+
+ // Fetch the extra parameters of this op.
+ minML := pat[*fp.patIdx()]
+ *fp.patIdx()++
+ maxML := pat[*fp.patIdx()]
+ *fp.patIdx()++
+
+ continueLoc := pat[*fp.patIdx()].value()
+ *fp.patIdx()++
+
+ lbStartIdx := &m.data[op.value()+4]
+
+ if *lbStartIdx < 0 {
+ // First time through loop.
+ *lbStartIdx = *fp.inputIdx() - int(minML)
+ if *lbStartIdx > 0 {
+ // move index to a code point boundary, if it's not on one already.
+ *lbStartIdx = *fp.inputIdx()
+ }
+ } else {
+ // 2nd through nth time through the loop.
+ // Back up start position for match by one.
+ *lbStartIdx--
+ }
+
+ if *lbStartIdx < 0 || *lbStartIdx < *fp.inputIdx()-int(maxML) {
+ // We have tried all potential match starting points without
+ // getting a match, which means that the negative lookbehind as
+ // a whole has succeeded. Jump forward to the continue location
+ m.activeStart = m.data[op.value()+2]
+ m.activeLimit = m.data[op.value()+3]
+ *fp.patIdx() = continueLoc
+ break
+ }
+
+ // Save state to this URX_LB_CONT op, so failure to match will repeat the loop.
+ // (successful match will cause a FAIL out of the loop altogether.)
+ fp, err = m.stateSave(*fp.inputIdx(), *fp.patIdx()-4)
+ if err != nil {
+ return err
+ }
+ *fp.inputIdx() = *lbStartIdx
+ case urxLbnEnd:
+ // End of a negative look-behind block, after a successful match.
+
+ if *fp.inputIdx() != m.activeLimit {
+ // The look-behind expression matched, but the match did not
+ // extend all the way to the point that we are looking behind from.
+ // FAIL out of here, which will take us back to the LB_CONT, which
+ // will retry the match starting at another position or succeed
+ // the look-behind altogether, whichever is appropriate.
+ fp = m.stack.popFrame()
+ break
+ }
+
+ // Look-behind expression matched, which means look-behind test as
+ // a whole Fails
+
+ // Restore the orignal input string length, which had been truncated
+ // inorder to pin the end of the lookbehind match
+ // to the position being looked-behind.
+ m.activeStart = m.data[op.value()+2]
+ m.activeLimit = m.data[op.value()+3]
+
+ // Restore original stack position, discarding any state saved
+ // by the successful pattern match.
+ newStackSize := m.data[op.value()]
+ m.stack.setSize(newStackSize)
+
+ // FAIL, which will take control back to someplace
+ // prior to entering the look-behind test.
+ fp = m.stack.popFrame()
+ case urxLoopSrI:
+ // Loop Initialization for the optimized implementation of
+ // [some character set]*
+ // This op scans through all matching input.
+ // The following LOOP_C op emulates stack unwinding if the following pattern fails.
+ s := sets[op.value()]
+
+ // Loop through input, until either the input is exhausted or
+ // we reach a character that is not a member of the set.
+ ix := *fp.inputIdx()
+
+ for {
+ if ix >= m.activeLimit {
+ m.hitEnd = true
+ break
+ }
+ c := charAt(inputText, ix)
+ if !s.ContainsRune(c) {
+ break
+ }
+ ix++
+ }
+
+ // If there were no matching characters, skip over the loop altogether.
+ // The loop doesn't run at all, a * op always succeeds.
+ if ix == *fp.inputIdx() {
+ *fp.patIdx()++ // skip the URX_LOOP_C op.
+ break
+ }
+
+ // Peek ahead in the compiled pattern, to the URX_LOOP_C that
+ // must follow. It's operand is the stack location
+ // that holds the starting input index for the match of this [set]*
+ loopcOp := pat[*fp.patIdx()]
+ stackLoc := loopcOp.value()
+ *fp.extra(stackLoc) = *fp.inputIdx()
+ *fp.inputIdx() = ix
+
+ // Save State to the URX_LOOP_C op that follows this one,
+ // so that match failures in the following code will return to there.
+ // Then bump the pattern idx so the LOOP_C is skipped on the way out of here.
+ fp, err = m.stateSave(*fp.inputIdx(), *fp.patIdx())
+ if err != nil {
+ return err
+ }
+ *fp.patIdx()++
+ case urxLoopDotI:
+ // Loop Initialization for the optimized implementation of .*
+ // This op scans through all remaining input.
+ // The following LOOP_C op emulates stack unwinding if the following pattern fails.
+
+ // Loop through input until the input is exhausted (we reach an end-of-line)
+ // In DOTALL mode, we can just go straight to the end of the input.
+ var ix int
+ if (op.value() & 1) == 1 {
+ // Dot-matches-All mode. Jump straight to the end of the string.
+ ix = m.activeLimit
+ m.hitEnd = true
+ } else {
+ // NOT DOT ALL mode. Line endings do not match '.'
+ // Scan forward until a line ending or end of input.
+ ix = *fp.inputIdx()
+ for {
+ if ix >= m.activeLimit {
+ m.hitEnd = true
+ break
+ }
+ c := charAt(inputText, ix)
+ if (c & 0x7f) <= 0x29 { // Fast filter of non-new-line-s
+ if (c == 0x0a) || // 0x0a is newline in both modes.
+ (((op.value() & 2) == 0) && // IF not UNIX_LINES mode
+ isLineTerminator(c)) {
+ // char is a line ending. Exit the scanning loop.
+ break
+ }
+ }
+ ix++
+ }
+ }
+
+ // If there were no matching characters, skip over the loop altogether.
+ // The loop doesn't run at all, a * op always succeeds.
+ if ix == *fp.inputIdx() {
+ *fp.patIdx()++ // skip the URX_LOOP_C op.
+ break
+ }
+
+ // Peek ahead in the compiled pattern, to the URX_LOOP_C that
+ // must follow. It's operand is the stack location
+ // that holds the starting input index for the match of this .*
+ loopcOp := pat[*fp.patIdx()]
+ stackLoc := loopcOp.value()
+ *fp.extra(stackLoc) = *fp.inputIdx()
+ *fp.inputIdx() = ix
+
+ // Save State to the URX_LOOP_C op that follows this one,
+ // so that match failures in the following code will return to there.
+ // Then bump the pattern idx so the LOOP_C is skipped on the way out of here.
+ fp, err = m.stateSave(*fp.inputIdx(), *fp.patIdx())
+ if err != nil {
+ return err
+ }
+ *fp.patIdx()++
+
+ case urxLoopC:
+ backSearchIndex := *fp.extra(op.value())
+
+ if backSearchIndex == *fp.inputIdx() {
+ // We've backed up the input idx to the point that the loop started.
+ // The loop is done. Leave here without saving state.
+ // Subsequent failures won't come back here.
+ break
+ }
+ // Set up for the next iteration of the loop, with input index
+ // backed up by one from the last time through,
+ // and a state save to this instruction in case the following code fails again.
+ // (We're going backwards because this loop emulates stack unwinding, not
+ // the initial scan forward.)
+
+ prevC := charAt(inputText, *fp.inputIdx()-1)
+ *fp.inputIdx()--
+ twoPrevC := charAt(inputText, *fp.inputIdx()-1)
+
+ if prevC == 0x0a &&
+ *fp.inputIdx() > backSearchIndex &&
+ twoPrevC == 0x0d {
+ prevOp := pat[*fp.patIdx()-2]
+ if prevOp.typ() == urxLoopDotI {
+ // .*, stepping back over CRLF pair.
+ *fp.inputIdx()--
+ }
+ }
+
+ fp, err = m.stateSave(*fp.inputIdx(), *fp.patIdx()-1)
+ if err != nil {
+ return err
+ }
+ default:
+ // Trouble. The compiled pattern contains an entry with an
+ // unrecognized type tag.
+ // Unknown opcode type in opType = URX_TYPE(pat[fp->fPatIdx]). But we have
+ // reports of this in production code, don't use UPRV_UNREACHABLE_EXIT.
+ // See ICU-21669.
+ return &MatchError{
+ Code: InternalMatchError,
+ Pattern: m.pattern.pattern,
+ Position: *fp.inputIdx(),
+ Input: m.input,
+ }
+ }
+ }
+
+breakFromLoop:
+ m.match = isMatch
+ if isMatch {
+ m.lastMatchEnd = m.matchEnd
+ m.matchStart = startIdx
+ m.matchEnd = *fp.inputIdx()
+ }
+
+ if m.dumper != nil {
+ if isMatch {
+ fmt.Fprintf(m.dumper, "Match. start=%d end=%d\n\n", m.matchStart, m.matchEnd)
+ } else {
+ fmt.Fprintf(m.dumper, "No match\n\n")
+ }
+ }
+
+ m.frame = fp // The active stack frame when the engine stopped.
+ // Contains the capture group results that we need to
+ // access later.
+ return nil
+}
+
+func charAt(str []rune, idx int) rune {
+ if idx >= 0 && idx < len(str) {
+ return str[idx]
+ }
+ return -1
+}
+
+func (m *Matcher) isWordBoundary(pos int) bool {
+ cIsWord := false
+
+ if pos >= m.lookLimit {
+ m.hitEnd = true
+ } else {
+ c := charAt(m.input, pos)
+ if uprops.HasBinaryProperty(c, uprops.UCharGraphemeExtend) || uchar.CharType(c) == uchar.FormatChar {
+ return false
+ }
+ cIsWord = staticPropertySets[urxIswordSet].ContainsRune(c)
+ }
+
+ prevCIsWord := false
+ for {
+ if pos <= m.lookStart {
+ break
+ }
+ prevChar := charAt(m.input, pos-1)
+ pos--
+ if !(uprops.HasBinaryProperty(prevChar, uprops.UCharGraphemeExtend) || uchar.CharType(prevChar) == uchar.FormatChar) {
+ prevCIsWord = staticPropertySets[urxIswordSet].ContainsRune(prevChar)
+ break
+ }
+ }
+ return cIsWord != prevCIsWord
+}
+
+func (m *Matcher) isUWordBoundary(pos int) bool {
+ // TODO: implement
+ /*
+ UBool returnVal = FALSE;
+
+ #if UCONFIG_NO_BREAK_ITERATION==0
+ // Note: this point will never be reached if break iteration is configured out.
+ // Regex patterns that would require this function will fail to compile.
+
+ // If we haven't yet created a break iterator for this matcher, do it now.
+ if (fWordBreakItr == nullptr) {
+ fWordBreakItr = BreakIterator::createWordInstance(Locale::getEnglish(), status);
+ if (U_FAILURE(status)) {
+ return FALSE;
+ }
+ fWordBreakItr->setText(fInputText, status);
+ }
+
+ // Note: zero width boundary tests like \b see through transparent region bounds,
+ // which is why fLookLimit is used here, rather than fActiveLimit.
+ if (pos >= fLookLimit) {
+ fHitEnd = TRUE;
+ returnVal = TRUE; // With Unicode word rules, only positions within the interior of "real"
+ // words are not boundaries. All non-word chars stand by themselves,
+ // with word boundaries on both sides.
+ } else {
+ returnVal = fWordBreakItr->isBoundary((int32_t)pos);
+ }
+ #endif
+ return returnVal;
+ */
+ return false
+}
+
+func (m *Matcher) resetStack() stackFrame {
+ m.stack.reset()
+ frame, _ := m.stack.newFrame(0, nil, "")
+ frame.clearExtra()
+ return frame
+}
+
+func (m *Matcher) stateSave(inputIdx, savePatIdx int) (stackFrame, error) {
+ // push storage for a new frame.
+ newFP, err := m.stack.newFrame(inputIdx, m.input, m.pattern.pattern)
+ if err != nil {
+ return nil, err
+ }
+ fp := m.stack.prevFromTop()
+
+ // New stack frame = copy of old top frame.
+ copy(newFP, fp)
+
+ m.tickCounter--
+ if m.tickCounter <= 0 {
+ if err := m.incrementTime(*fp.inputIdx()); err != nil {
+ return nil, err
+ }
+ }
+ *fp.patIdx() = savePatIdx
+ return newFP, nil
+}
+
+func (m *Matcher) incrementTime(inputIdx int) error {
+ m.tickCounter = timerInitialValue
+ m.time++
+ if m.timeLimit > 0 && m.time >= m.timeLimit {
+ return &MatchError{
+ Code: TimeOut,
+ Pattern: m.pattern.pattern,
+ Position: inputIdx,
+ Input: m.input,
+ }
+ }
+ return nil
+}
+
+func (m *Matcher) isDecimalDigit(c rune) bool {
+ return uchar.IsDigit(c)
+}
+
+func (m *Matcher) isHorizWS(c rune) bool {
+ return uchar.CharType(c) == uchar.SpaceSeparator || c == 9
+}
+
+func (m *Matcher) followingGCBoundary(pos int) int {
+ // TODO: implement
+ return pos
+ /*
+ // Note: this point will never be reached if break iteration is configured out.
+ // Regex patterns that would require this function will fail to compile.
+
+ // If we haven't yet created a break iterator for this matcher, do it now.
+ if (m.gcBreakItr == nil) {
+ m.gcBreakItr = BreakIterator::createCharacterInstance(Locale::getEnglish(), status);
+ if (U_FAILURE(status)) {
+ return pos;
+ }
+ fGCBreakItr->setText(fInputText, status);
+ }
+ result = fGCBreakItr->following(pos);
+ if (result == BreakIterator::DONE) {
+ result = pos;
+ }
+ */
+}
+
+func (m *Matcher) ResetString(input string) {
+ m.Reset([]rune(input))
+}
+
+func (m *Matcher) Reset(input []rune) {
+ m.input = input
+ m.reset()
+}
+
+func (m *Matcher) Matches() (bool, error) {
+ err := m.MatchAt(m.activeStart, true)
+ return m.match, err
+}
+
+func (m *Matcher) LookingAt() (bool, error) {
+ err := m.MatchAt(m.activeStart, false)
+ return m.match, err
+}
+
+func (m *Matcher) Find() (bool, error) {
+ startPos := m.matchEnd
+ if startPos == 0 {
+ startPos = m.activeStart
+ }
+
+ if m.match {
+ // Save the position of any previous successful match.
+ m.lastMatchEnd = m.matchEnd
+ if m.matchStart == m.matchEnd {
+ // Previous match had zero length. Move start position up one position
+ // to avoid sending find() into a loop on zero-length matches.
+ if startPos >= m.activeLimit {
+ m.match = false
+ m.hitEnd = true
+ return false, nil
+ }
+ startPos++
+ }
+ } else {
+ if m.lastMatchEnd >= 0 {
+ // A previous find() failed to match. Don't try again.
+ // (without this test, a pattern with a zero-length match
+ // could match again at the end of an input string.)
+ m.hitEnd = true
+ return false, nil
+ }
+ }
+
+ testStartLimit := m.activeLimit - int(m.pattern.minMatchLen)
+ if startPos > testStartLimit {
+ m.match = false
+ m.hitEnd = true
+ return false, nil
+ }
+
+ switch m.pattern.startType {
+ case startNoInfo:
+ // No optimization was found.
+ // Try a match at each input position.
+ for {
+ err := m.MatchAt(startPos, false)
+ if err != nil {
+ return false, err
+ }
+ if m.match {
+ return true, nil
+ }
+ if startPos >= testStartLimit {
+ m.hitEnd = true
+ return false, nil
+ }
+ startPos++
+ }
+ case startSet:
+ // Match may start on any char from a pre-computed set.
+ for {
+ pos := startPos
+ c := charAt(m.input, startPos)
+ startPos++
+ // c will be -1 (U_SENTINEL) at end of text, in which case we
+ // skip this next block (so we don't have a negative array index)
+ // and handle end of text in the following block.
+ if c >= 0 && m.pattern.initialChars.ContainsRune(c) {
+ err := m.MatchAt(pos, false)
+ if err != nil {
+ return false, err
+ }
+ if m.match {
+ return true, nil
+ }
+ }
+
+ if startPos > testStartLimit {
+ m.match = false
+ m.hitEnd = true
+ return false, nil
+ }
+ }
+ case startStart:
+ // Matches are only possible at the start of the input string
+ // (pattern begins with ^ or \A)
+ if startPos > m.activeStart {
+ m.match = false
+ return false, nil
+ }
+ err := m.MatchAt(startPos, false)
+ return m.match, err
+ case startLine:
+ var ch rune
+ if startPos == m.anchorStart {
+ err := m.MatchAt(startPos, false)
+ if err != nil {
+ return false, err
+ }
+ if m.match {
+ return true, nil
+ }
+ ch = charAt(m.input, startPos)
+ startPos++
+ } else {
+ ch = charAt(m.input, startPos-1)
+ }
+
+ if m.pattern.flags&UnixLines != 0 {
+ for {
+ if ch == 0x0a {
+ err := m.MatchAt(startPos, false)
+ if err != nil {
+ return false, err
+ }
+ if m.match {
+ return true, nil
+ }
+ }
+ if startPos >= testStartLimit {
+ m.match = false
+ m.hitEnd = true
+ return false, nil
+ }
+ ch = charAt(m.input, startPos)
+ startPos++
+ }
+ } else {
+ for {
+ if isLineTerminator(ch) {
+ if ch == 0x0d && startPos < m.activeLimit && charAt(m.input, startPos) == 0x0a {
+ startPos++
+ }
+ err := m.MatchAt(startPos, false)
+ if err != nil {
+ return false, err
+ }
+ if m.match {
+ return true, nil
+ }
+ }
+ if startPos >= testStartLimit {
+ m.match = false
+ m.hitEnd = true
+ return false, nil
+ }
+ ch = charAt(m.input, startPos)
+ startPos++
+ }
+ }
+ case startChar, startString:
+ // Match starts on exactly one char.
+ theChar := m.pattern.initialChar
+ for {
+ pos := startPos
+ c := charAt(m.input, startPos)
+ startPos++
+ if c == theChar {
+ err := m.MatchAt(pos, false)
+ if err != nil {
+ return false, err
+ }
+ if m.match {
+ return true, nil
+ }
+ }
+ if startPos > testStartLimit {
+ m.match = false
+ m.hitEnd = true
+ return false, nil
+ }
+ }
+ default:
+ // Unknown value in fPattern->fStartType, should be from StartOfMatch enum. But
+ // we have reports of this in production code, don't use UPRV_UNREACHABLE_EXIT.
+ // See ICU-21669.
+ return false, &MatchError{
+ Code: InternalMatchError,
+ Pattern: m.pattern.pattern,
+ Position: startPos,
+ Input: m.input,
+ }
+ }
+}
+
+func (m *Matcher) Start() int {
+ if !m.match {
+ return -1
+ }
+
+ return m.matchStart
+}
+
+func (m *Matcher) reset() {
+ m.regionStart = 0
+ m.regionLimit = len(m.input)
+ m.activeStart = 0
+ m.activeLimit = len(m.input)
+ m.anchorStart = 0
+ m.anchorLimit = len(m.input)
+ m.lookStart = 0
+ m.lookLimit = len(m.input)
+ m.resetPreserveRegion()
+}
+
+func (m *Matcher) resetPreserveRegion() {
+ m.matchStart = 0
+ m.matchEnd = 0
+ m.lastMatchEnd = -1
+ m.appendPosition = 0
+ m.match = false
+ m.hitEnd = false
+ m.requireEnd = false
+ m.time = 0
+ m.tickCounter = timerInitialValue
+}
+
+func (m *Matcher) GroupCount() int {
+ return len(m.pattern.groupMap)
+}
+
+func (m *Matcher) StartForGroup(group int) int {
+ if !m.match {
+ return -1
+ }
+ if group < 0 || group > len(m.pattern.groupMap) {
+ return -1
+ }
+ if group == 0 {
+ return m.matchStart
+ }
+ groupOffset := int(m.pattern.groupMap[group-1])
+ return *m.frame.extra(groupOffset)
+}
+
+func (m *Matcher) EndForGroup(group int) int {
+ if !m.match {
+ return -1
+ }
+ if group < 0 || group > len(m.pattern.groupMap) {
+ return -1
+ }
+ if group == 0 {
+ return m.matchEnd
+ }
+ groupOffset := int(m.pattern.groupMap[group-1])
+ return *m.frame.extra(groupOffset + 1)
+}
+
+func (m *Matcher) HitEnd() bool {
+ return m.hitEnd
+}
+
+func (m *Matcher) RequireEnd() bool {
+ return m.requireEnd
+}
+
+func (m *Matcher) Group(i int) (string, bool) {
+ start := m.StartForGroup(i)
+ end := m.EndForGroup(i)
+ if start == -1 || end == -1 {
+ return "", false
+ }
+ return string(m.input[start:end]), true
+}
+
+func (m *Matcher) End() int {
+ if !m.match {
+ return -1
+ }
+
+ return m.matchEnd
+}
+
+func (m *Matcher) Dumper(out io.Writer) {
+ m.dumper = out
+}
+
+// Test for any of the Unicode line terminating characters.
+func isLineTerminator(c rune) bool {
+ if (c & ^(0x0a | 0x0b | 0x0c | 0x0d | 0x85 | 0x2028 | 0x2029)) != 0 {
+ return false
+ }
+ return (c <= 0x0d && c >= 0x0a) || c == 0x85 || c == 0x2028 || c == 0x2029
+}
diff --git a/go/mysql/icuregex/ops.go b/go/mysql/icuregex/ops.go
new file mode 100644
index 00000000000..4150cf523d2
--- /dev/null
+++ b/go/mysql/icuregex/ops.go
@@ -0,0 +1,414 @@
+/*
+© 2016 and later: Unicode, Inc. and others.
+Copyright (C) 2004-2015, International Business Machines Corporation and others.
+Copyright 2023 The Vitess Authors.
+
+This file contains code derived from the Unicode Project's ICU library.
+License & terms of use for the original code: http://www.unicode.org/copyright.html
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package icuregex
+
+import (
+ "slices"
+
+ "vitess.io/vitess/go/mysql/icuregex/internal/ucase"
+ "vitess.io/vitess/go/mysql/icuregex/internal/utf16"
+)
+
+type opcode uint8
+
+const (
+ urxReservedOp opcode = iota // For multi-operand ops, most non-first words.
+ urxBacktrack // Force a backtrack, as if a match test had failed.
+ urxEnd
+ urxOnechar // Value field is the 21 bit unicode char to match
+ urxString // Value field is index of string start
+ urxStringLen // Value field is string length (code units)
+ urxStateSave // Value field is pattern position to push
+ urxNop
+ urxStartCapture // Value field is capture group number.
+ urxEndCapture // Value field is capture group number
+ urxStaticSetref // Value field is index of set in array of sets.
+ urxSetref // Value field is index of set in array of sets.
+ urxDotany
+ urxJmp // Value field is destination position in the pattern.
+ urxFail // Stop match operation, No match.
+
+ urxJmpSav // Operand: JMP destination location
+ urxBackslashB // Value field: 0: \b 1: \B
+ urxBackslashG
+ urxJmpSavX // Conditional JMP_SAV,
+ // Used in (x)+, breaks loop on zero length match.
+ // Operand: Jmp destination.
+ urxBackslashX
+ urxBackslashZ // \z Unconditional end of line.
+
+ urxDotanyAll // ., in the . matches any mode.
+ urxBackslashD // Value field: 0: \d 1: \D
+ urxCaret // Value field: 1: multi-line mode.
+ urxDollar // Also for \Z
+
+ urxCtrInit // Counter Inits for {Interval} loops.
+ urxCtrInitNg // 2 kinds, normal and non-greedy.
+ // These are 4 word opcodes. See description.
+ // First Operand: Data loc of counter variable
+ // 2nd Operand: Pat loc of the URX_CTR_LOOPx
+ // at the end of the loop.
+ // 3rd Operand: Minimum count.
+ // 4th Operand: Max count, -1 for unbounded.
+
+ urxDotanyUnix // '.' operator in UNIX_LINES mode, only \n marks end of line.
+
+ utxCtrLoop // Loop Ops for {interval} loops.
+ urxCtrLoopNg // Also in three flavors.
+ // Operand is loc of corresponding CTR_INIT.
+
+ urxCaretMUnix // '^' operator, test for start of line in multi-line
+ // plus UNIX_LINES mode.
+
+ urxRelocOprnd // Operand value in multi-operand ops that refers
+ // back into compiled pattern code, and thus must
+ // be relocated when inserting/deleting ops in code.
+
+ urxStoSp // Store the stack ptr. Operand is location within
+ // matcher data (not stack data) to store it.
+ urxLdSp // Load the stack pointer. Operand is location
+ // to load from.
+ urxBackref // Back Reference. Parameter is the index of the
+ // capture group variables in the state stack frame.
+ urxStoInpLoc // Store the input location. Operand is location
+ // within the matcher stack frame.
+ urxJmpx // Conditional JMP.
+ // First Operand: JMP target location.
+ // Second Operand: Data location containing an
+ // input position. If current input position ==
+ // saved input position, FAIL rather than taking
+ // the JMP
+ urxLaStart // Starting a LookAround expression.
+ // Save InputPos, SP and active region in static data.
+ // Operand: Static data offset for the save
+ urxLaEnd // Ending a Lookaround expression.
+ // Restore InputPos and Stack to saved values.
+ // Operand: Static data offset for saved data.
+ urcOnecharI // Test for case-insensitive match of a literal character.
+ // Operand: the literal char.
+ urxStringI // Case insensitive string compare.
+ // First Operand: Index of start of string in string literals
+ // Second Operand (next word in compiled code):
+ // the length of the string.
+ urxBackrefI // Case insensitive back reference.
+ // Parameter is the index of the
+ // capture group variables in the state stack frame.
+ urxDollarM // $ in multi-line mode.
+ urxCaretM // ^ in multi-line mode.
+ urxLbStart // LookBehind Start.
+ // Parameter is data location
+ urxLbCont // LookBehind Continue.
+ // Param 0: the data location
+ // Param 1: The minimum length of the look-behind match
+ // Param 2: The max length of the look-behind match
+ urxLbEnd // LookBehind End.
+ // Parameter is the data location.
+ // Check that match ended at the right spot,
+ // Restore original input string len.
+ urxLbnCount // Negative LookBehind Continue
+ // Param 0: the data location
+ // Param 1: The minimum length of the look-behind match
+ // Param 2: The max length of the look-behind match
+ // Param 3: The pattern loc following the look-behind block.
+ urxLbnEnd // Negative LookBehind end
+ // Parameter is the data location.
+ // Check that the match ended at the right spot.
+ urxStatSetrefN // Reference to a prebuilt set (e.g. \w), negated
+ // Operand is index of set in array of sets.
+ urxLoopSrI // Init a [set]* loop.
+ // Operand is the sets index in array of user sets.
+ urxLoopC // Continue a [set]* or OneChar* loop.
+ // Operand is a matcher static data location.
+ // Must always immediately follow LOOP_x_I instruction.
+ urxLoopDotI // .*, initialization of the optimized loop.
+ // Operand value:
+ // bit 0:
+ // 0: Normal (. doesn't match new-line) mode.
+ // 1: . matches new-line mode.
+ // bit 1: controls what new-lines are recognized by this operation.
+ // 0: All Unicode New-lines
+ // 1: UNIX_LINES, \u000a only.
+ urxBackslashBu // \b or \B in UREGEX_UWORD mode, using Unicode style
+ // word boundaries.
+ urxDollarD // $ end of input test, in UNIX_LINES mode.
+ urxDollarMd // $ end of input test, in MULTI_LINE and UNIX_LINES mode.
+ urxBackslashH // Value field: 0: \h 1: \H
+ urxBackslashR // Any line break sequence.
+ urxBackslashV // Value field: 0: \v 1: \V
+
+ urxReservedOpN opcode = 255 // For multi-operand ops, negative operand values.
+)
+
+// Keep this list of opcode names in sync with the above enum
+//
+// Used for debug printing only.
+var urxOpcodeNames = []string{
+ " ",
+ "BACKTRACK",
+ "END",
+ "ONECHAR",
+ "STRING",
+ "STRING_LEN",
+ "STATE_SAVE",
+ "NOP",
+ "START_CAPTURE",
+ "END_CAPTURE",
+ "URX_STATIC_SETREF",
+ "SETREF",
+ "DOTANY",
+ "JMP",
+ "FAIL",
+ "JMP_SAV",
+ "BACKSLASH_B",
+ "BACKSLASH_G",
+ "JMP_SAV_X",
+ "BACKSLASH_X",
+ "BACKSLASH_Z",
+ "DOTANY_ALL",
+ "BACKSLASH_D",
+ "CARET",
+ "DOLLAR",
+ "CTR_INIT",
+ "CTR_INIT_NG",
+ "DOTANY_UNIX",
+ "CTR_LOOP",
+ "CTR_LOOP_NG",
+ "URX_CARET_M_UNIX",
+ "RELOC_OPRND",
+ "STO_SP",
+ "LD_SP",
+ "BACKREF",
+ "STO_INP_LOC",
+ "JMPX",
+ "LA_START",
+ "LA_END",
+ "ONECHAR_I",
+ "STRING_I",
+ "BACKREF_I",
+ "DOLLAR_M",
+ "CARET_M",
+ "LB_START",
+ "LB_CONT",
+ "LB_END",
+ "LBN_CONT",
+ "LBN_END",
+ "STAT_SETREF_N",
+ "LOOP_SR_I",
+ "LOOP_C",
+ "LOOP_DOT_I",
+ "BACKSLASH_BU",
+ "DOLLAR_D",
+ "DOLLAR_MD",
+ "URX_BACKSLASH_H",
+ "URX_BACKSLASH_R",
+ "URX_BACKSLASH_V",
+}
+
+type instruction int32
+
+func (ins instruction) typ() opcode {
+ return opcode(uint32(ins) >> 24)
+}
+
+func (ins instruction) value32() int32 {
+ return int32(ins) & 0xffffff
+}
+
+func (ins instruction) value() int {
+ return int(ins.value32())
+}
+
+// Access to Unicode Sets composite character properties
+//
+// The sets are accessed by the match engine for things like \w (word boundary)
+const (
+ urxIswordSet = 1
+ urxIsalnumSet = 2
+ urxIsalphaSet = 3
+ urxIsspaceSet = 4
+
+ urxGcNormal = iota + 1 // Sets for finding grapheme cluster boundaries.
+ urxGcExtend
+ urxGcControl
+ urxGcL
+ urxGcLv
+ urxGcLvt
+ urxGcV
+ urxGcT
+
+ urxNegSet = 0x800000 // Flag bit to reverse sense of set
+ // membership test.
+)
+
+type stack struct {
+ ary []int
+ frameSize int
+ stackLimit int
+}
+
+type stackFrame []int
+
+func (f stackFrame) inputIdx() *int {
+ return &f[0]
+}
+
+func (f stackFrame) patIdx() *int {
+ return &f[1]
+}
+
+func (f stackFrame) extra(n int) *int {
+ return &f[2+n]
+}
+
+func (f stackFrame) equals(f2 stackFrame) bool {
+ return &f[0] == &f2[0]
+}
+
+func (s *stack) len() int {
+ return len(s.ary)
+}
+
+func (s *stack) sp() int {
+ return len(s.ary) - s.frameSize
+}
+
+func (s *stack) newFrame(inputIdx int, input []rune, pattern string) (stackFrame, error) {
+ if s.stackLimit != 0 && len(s.ary)+s.frameSize > s.stackLimit {
+ return nil, &MatchError{
+ Code: StackOverflow,
+ Pattern: pattern,
+ Position: inputIdx,
+ Input: input,
+ }
+ }
+ s.ary = slices.Grow(s.ary, s.frameSize)
+
+ f := s.ary[len(s.ary) : len(s.ary)+s.frameSize]
+ s.ary = s.ary[:len(s.ary)+s.frameSize]
+ return f, nil
+}
+
+func (s *stack) prevFromTop() stackFrame {
+ return s.ary[len(s.ary)-2*s.frameSize:]
+}
+
+func (s *stack) popFrame() stackFrame {
+ s.ary = s.ary[:len(s.ary)-s.frameSize]
+ return s.ary[len(s.ary)-s.frameSize:]
+}
+
+func (s *stack) reset() {
+ s.ary = s.ary[:0]
+}
+
+func (s *stack) offset(size int) stackFrame {
+ return s.ary[size-s.frameSize : size]
+}
+
+func (s *stack) setSize(size int) {
+ s.ary = s.ary[:size]
+}
+
+func (f stackFrame) clearExtra() {
+ for i := 2; i < len(f); i++ {
+ f[i] = -1
+ }
+}
+
+// number of UVector elements in the header
+const restackframeHdrCount = 2
+
+// Start-Of-Match type. Used by find() to quickly scan to positions where a
+//
+// match might start before firing up the full match engine.
+type startOfMatch int8
+
+const (
+ startNoInfo startOfMatch = iota // No hint available.
+ startChar // Match starts with a literal code point.
+ startSet // Match starts with something matching a set.
+ startStart // Match starts at start of buffer only (^ or \A)
+ startLine // Match starts with ^ in multi-line mode.
+ startString // Match starts with a literal string.
+)
+
+func (som startOfMatch) String() string {
+ switch som {
+ case startNoInfo:
+ return "START_NO_INFO"
+ case startChar:
+ return "START_CHAR"
+ case startSet:
+ return "START_SET"
+ case startStart:
+ return "START_START"
+ case startLine:
+ return "START_LINE"
+ case startString:
+ return "START_STRING"
+ default:
+ panic("unknown StartOfMatch")
+ }
+}
+
+type caseFoldIterator struct {
+ chars []rune
+ index int
+ limit int
+
+ foldChars []uint16
+}
+
+func (it *caseFoldIterator) next() rune {
+ if len(it.foldChars) == 0 {
+ // We are not in a string folding of an earlier character.
+ // Start handling the next char from the input UText.
+ if it.index >= it.limit {
+ return -1
+ }
+
+ originalC := it.chars[it.index]
+ it.index++
+
+ originalC, it.foldChars = ucase.FullFolding(originalC)
+ if len(it.foldChars) == 0 {
+ // input code point folds to a single code point, possibly itself.
+ return originalC
+ }
+ }
+
+ var res rune
+ res, it.foldChars = utf16.NextUnsafe(it.foldChars)
+ return res
+}
+
+func (it *caseFoldIterator) inExpansion() bool {
+ return len(it.foldChars) > 0
+}
+
+func newCaseFoldIterator(chars []rune, start, limit int) caseFoldIterator {
+ return caseFoldIterator{
+ chars: chars,
+ index: start,
+ limit: limit,
+ }
+}
diff --git a/go/mysql/icuregex/pattern.go b/go/mysql/icuregex/pattern.go
new file mode 100644
index 00000000000..90e69b3f55d
--- /dev/null
+++ b/go/mysql/icuregex/pattern.go
@@ -0,0 +1,136 @@
+/*
+© 2016 and later: Unicode, Inc. and others.
+Copyright (C) 2004-2015, International Business Machines Corporation and others.
+Copyright 2023 The Vitess Authors.
+
+This file contains code derived from the Unicode Project's ICU library.
+License & terms of use for the original code: http://www.unicode.org/copyright.html
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package icuregex
+
+import (
+ "vitess.io/vitess/go/mysql/icuregex/internal/uset"
+)
+
+type Pattern struct {
+ pattern string
+ flags RegexpFlag
+
+ compiledPat []instruction
+ literalText []rune
+
+ sets []*uset.UnicodeSet
+
+ minMatchLen int32
+ frameSize int
+ dataSize int
+
+ groupMap []int32
+
+ startType startOfMatch
+ initialStringIdx int
+ initialStringLen int
+ initialChars *uset.UnicodeSet
+ initialChar rune
+ needsAltInput bool
+
+ namedCaptureMap map[string]int
+}
+
+func NewPattern(flags RegexpFlag) *Pattern {
+ return &Pattern{
+ flags: flags,
+ initialChars: uset.New(),
+ // Slot zero of the vector of sets is reserved. Fill it here.
+ sets: []*uset.UnicodeSet{nil},
+ }
+}
+
+func Compile(in []rune, flags RegexpFlag) (*Pattern, error) {
+ pat := NewPattern(flags)
+ cmp := newCompiler(pat)
+ if err := cmp.compile(in); err != nil {
+ return nil, err
+ }
+ return pat, nil
+}
+
+func CompileString(in string, flags RegexpFlag) (*Pattern, error) {
+ return Compile([]rune(in), flags)
+}
+
+func (p *Pattern) Match(input string) *Matcher {
+ m := NewMatcher(p)
+ m.ResetString(input)
+ return m
+}
+
+type RegexpFlag int32
+
+const (
+ /** Enable case insensitive matching. @stable ICU 2.4 */
+ CaseInsensitive RegexpFlag = 2
+
+ /** Allow white space and comments within patterns @stable ICU 2.4 */
+ Comments RegexpFlag = 4
+
+ /** If set, '.' matches line terminators, otherwise '.' matching stops at line end.
+ * @stable ICU 2.4 */
+ DotAll RegexpFlag = 32
+
+ /** If set, treat the entire pattern as a literal string.
+ * Metacharacters or escape sequences in the input sequence will be given
+ * no special meaning.
+ *
+ * The flag UREGEX_CASE_INSENSITIVE retains its impact
+ * on matching when used in conjunction with this flag.
+ * The other flags become superfluous.
+ *
+ * @stable ICU 4.0
+ */
+ Literal RegexpFlag = 16
+
+ /** Control behavior of "$" and "^"
+ * If set, recognize line terminators within string,
+ * otherwise, match only at start and end of input string.
+ * @stable ICU 2.4 */
+ Multiline RegexpFlag = 8
+
+ /** Unix-only line endings.
+ * When this mode is enabled, only \\u000a is recognized as a line ending
+ * in the behavior of ., ^, and $.
+ * @stable ICU 4.0
+ */
+ UnixLines RegexpFlag = 1
+
+ /** Unicode word boundaries.
+ * If set, \b uses the Unicode TR 29 definition of word boundaries.
+ * Warning: Unicode word boundaries are quite different from
+ * traditional regular expression word boundaries. See
+ * http://unicode.org/reports/tr29/#Word_Boundaries
+ * @stable ICU 2.8
+ */
+ UWord RegexpFlag = 256
+
+ /** Error on Unrecognized backslash escapes.
+ * If set, fail with an error on patterns that contain
+ * backslash-escaped ASCII letters without a known special
+ * meaning. If this flag is not set, these
+ * escaped letters represent themselves.
+ * @stable ICU 4.0
+ */
+ ErrorOnUnknownEscapes RegexpFlag = 512
+)
diff --git a/go/mysql/icuregex/perl_test.go b/go/mysql/icuregex/perl_test.go
new file mode 100644
index 00000000000..e8dfc95d6b0
--- /dev/null
+++ b/go/mysql/icuregex/perl_test.go
@@ -0,0 +1,211 @@
+/*
+© 2016 and later: Unicode, Inc. and others.
+Copyright (C) 2004-2015, International Business Machines Corporation and others.
+Copyright 2023 The Vitess Authors.
+
+This file contains code derived from the Unicode Project's ICU library.
+License & terms of use for the original code: http://www.unicode.org/copyright.html
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package icuregex
+
+import (
+ "bufio"
+ "os"
+ "strconv"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+)
+
+func TestPerl(t *testing.T) {
+ f, err := os.Open("testdata/re_tests.txt")
+ require.NoError(t, err)
+ defer f.Close()
+
+ flagPat, err := CompileString(`('?)(.*)\1(.*)`, 0)
+ require.NoError(t, err)
+ flagMat := NewMatcher(flagPat)
+
+ groupsPat, err := CompileString(`\$([+\-])\[(\d+)\]`, 0)
+ require.NoError(t, err)
+ groupsMat := NewMatcher(groupsPat)
+
+ cgPat, err := CompileString(`\$(\d+)`, 0)
+ require.NoError(t, err)
+ cgMat := NewMatcher(cgPat)
+
+ group := func(m *Matcher, idx int) string {
+ g, _ := m.Group(idx)
+ return g
+ }
+
+ lookingAt := func(m *Matcher) bool {
+ ok, err := m.LookingAt()
+ require.NoError(t, err)
+ return ok
+ }
+
+ replacer := strings.NewReplacer(
+ `${bang}`, "!",
+ `${nulnul}`, "\x00\x00",
+ `${ffff}`, "\uffff",
+ )
+
+ scanner := bufio.NewScanner(f)
+ var lineno int
+
+ for scanner.Scan() {
+ lineno++
+ fields := strings.Split(scanner.Text(), "\t")
+
+ flagMat.ResetString(fields[0])
+ ok, _ := flagMat.Matches()
+ require.Truef(t, ok, "could not match pattern+flags (line %d)", lineno)
+
+ pattern, _ := flagMat.Group(2)
+ pattern = replacer.Replace(pattern)
+
+ flagStr, _ := flagMat.Group(3)
+ var flags RegexpFlag
+ if strings.IndexByte(flagStr, 'i') >= 0 {
+ flags |= CaseInsensitive
+ }
+ if strings.IndexByte(flagStr, 'm') >= 0 {
+ flags |= Multiline
+ }
+ if strings.IndexByte(flagStr, 'x') >= 0 {
+ flags |= Comments
+ }
+
+ testPat, err := CompileString(pattern, flags)
+ if err != nil {
+ if cerr, ok := err.(*CompileError); ok && cerr.Code == Unimplemented {
+ continue
+ }
+ if strings.IndexByte(fields[2], 'c') == -1 && strings.IndexByte(fields[2], 'i') == -1 {
+ t.Errorf("line %d: ICU error %q", lineno, err)
+ }
+ continue
+ }
+
+ if strings.IndexByte(fields[2], 'i') >= 0 {
+ continue
+ }
+ if strings.IndexByte(fields[2], 'c') >= 0 {
+ t.Errorf("line %d: expected error", lineno)
+ continue
+ }
+
+ matchString := fields[1]
+ matchString = replacer.Replace(matchString)
+ matchString = strings.ReplaceAll(matchString, `\n`, "\n")
+
+ testMat := testPat.Match(matchString)
+ found, _ := testMat.Find()
+ expected := strings.IndexByte(fields[2], 'y') >= 0
+
+ if expected != found {
+ t.Errorf("line %d: expected %v, found %v", lineno, expected, found)
+ continue
+ }
+
+ if !found {
+ continue
+ }
+
+ var result []byte
+ var perlExpr = fields[3]
+
+ for len(perlExpr) > 0 {
+ groupsMat.ResetString(perlExpr)
+ cgMat.ResetString(perlExpr)
+
+ switch {
+ case strings.HasPrefix(perlExpr, "$&"):
+ result = append(result, group(testMat, 0)...)
+ perlExpr = perlExpr[2:]
+
+ case lookingAt(groupsMat):
+ groupNum, err := strconv.ParseInt(group(groupsMat, 2), 10, 32)
+ require.NoError(t, err)
+
+ var matchPosition int
+ if group(groupsMat, 1) == "+" {
+ matchPosition = testMat.EndForGroup(int(groupNum))
+ } else {
+ matchPosition = testMat.StartForGroup(int(groupNum))
+ }
+ if matchPosition != -1 {
+ result = strconv.AppendInt(result, int64(matchPosition), 10)
+ }
+
+ perlExpr = perlExpr[groupsMat.EndForGroup(0):]
+
+ case lookingAt(cgMat):
+ groupNum, err := strconv.ParseInt(group(cgMat, 1), 10, 32)
+ require.NoError(t, err)
+ result = append(result, group(testMat, int(groupNum))...)
+ perlExpr = perlExpr[cgMat.EndForGroup(0):]
+
+ case strings.HasPrefix(perlExpr, "@-"):
+ for i := 0; i <= testMat.GroupCount(); i++ {
+ if i > 0 {
+ result = append(result, ' ')
+ }
+ result = strconv.AppendInt(result, int64(testMat.StartForGroup(i)), 10)
+ }
+ perlExpr = perlExpr[2:]
+
+ case strings.HasPrefix(perlExpr, "@+"):
+ for i := 0; i <= testMat.GroupCount(); i++ {
+ if i > 0 {
+ result = append(result, ' ')
+ }
+ result = strconv.AppendInt(result, int64(testMat.EndForGroup(i)), 10)
+ }
+ perlExpr = perlExpr[2:]
+
+ case strings.HasPrefix(perlExpr, "\\"):
+ if len(perlExpr) > 1 {
+ perlExpr = perlExpr[1:]
+ }
+ c := perlExpr[0]
+ switch c {
+ case 'n':
+ c = '\n'
+ }
+ result = append(result, c)
+ perlExpr = perlExpr[1:]
+
+ default:
+ result = append(result, perlExpr[0])
+ perlExpr = perlExpr[1:]
+ }
+ }
+
+ var expectedS string
+ if len(fields) > 4 {
+ expectedS = fields[4]
+ expectedS = replacer.Replace(expectedS)
+ expectedS = strings.ReplaceAll(expectedS, `\n`, "\n")
+ }
+
+ if expectedS != string(result) {
+ t.Errorf("line %d: Incorrect Perl expression results for %s\nwant: %q\ngot: %q", lineno, pattern, expectedS, result)
+ }
+ }
+}
diff --git a/go/mysql/icuregex/sets.go b/go/mysql/icuregex/sets.go
new file mode 100644
index 00000000000..0f745b3374d
--- /dev/null
+++ b/go/mysql/icuregex/sets.go
@@ -0,0 +1,104 @@
+/*
+© 2016 and later: Unicode, Inc. and others.
+Copyright (C) 2004-2015, International Business Machines Corporation and others.
+Copyright 2023 The Vitess Authors.
+
+This file contains code derived from the Unicode Project's ICU library.
+License & terms of use for the original code: http://www.unicode.org/copyright.html
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package icuregex
+
+import (
+ "vitess.io/vitess/go/mysql/icuregex/internal/uprops"
+ "vitess.io/vitess/go/mysql/icuregex/internal/uset"
+)
+
+var staticPropertySets [13]*uset.UnicodeSet
+
+func init() {
+ staticPropertySets[urxIswordSet] = func() *uset.UnicodeSet {
+ s := uset.New()
+ s.AddAll(uprops.MustNewUnicodeSetFomPattern(`\p{Alphabetic}`, 0))
+ s.AddAll(uprops.MustNewUnicodeSetFomPattern(`\p{M}`, 0))
+ s.AddAll(uprops.MustNewUnicodeSetFomPattern(`\p{Nd}`, 0))
+ s.AddAll(uprops.MustNewUnicodeSetFomPattern(`\p{Pc}`, 0))
+ s.AddRune(0x200c)
+ s.AddRune(0x200d)
+ return s.Freeze()
+ }()
+
+ staticPropertySets[urxIsspaceSet] = uprops.MustNewUnicodeSetFomPattern(`\p{Whitespace}`, 0).Freeze()
+
+ staticPropertySets[urxGcExtend] = uprops.MustNewUnicodeSetFomPattern(`\p{Grapheme_Extend}`, 0).Freeze()
+ staticPropertySets[urxGcControl] = func() *uset.UnicodeSet {
+ s := uset.New()
+ s.AddAll(uprops.MustNewUnicodeSetFomPattern(`[:Zl:]`, 0))
+ s.AddAll(uprops.MustNewUnicodeSetFomPattern(`[:Zp:]`, 0))
+ s.AddAll(uprops.MustNewUnicodeSetFomPattern(`[:Cc:]`, 0))
+ s.AddAll(uprops.MustNewUnicodeSetFomPattern(`[:Cf:]`, 0))
+ s.RemoveAll(uprops.MustNewUnicodeSetFomPattern(`[:Grapheme_Extend:]`, 0))
+ return s.Freeze()
+ }()
+ staticPropertySets[urxGcL] = uprops.MustNewUnicodeSetFomPattern(`\p{Hangul_Syllable_Type=L}`, 0).Freeze()
+ staticPropertySets[urxGcLv] = uprops.MustNewUnicodeSetFomPattern(`\p{Hangul_Syllable_Type=LV}`, 0).Freeze()
+ staticPropertySets[urxGcLvt] = uprops.MustNewUnicodeSetFomPattern(`\p{Hangul_Syllable_Type=LVT}`, 0).Freeze()
+ staticPropertySets[urxGcV] = uprops.MustNewUnicodeSetFomPattern(`\p{Hangul_Syllable_Type=V}`, 0).Freeze()
+ staticPropertySets[urxGcT] = uprops.MustNewUnicodeSetFomPattern(`\p{Hangul_Syllable_Type=T}`, 0).Freeze()
+
+ staticPropertySets[urxGcNormal] = func() *uset.UnicodeSet {
+ s := uset.New()
+ s.Complement()
+ s.RemoveRuneRange(0xac00, 0xd7a4)
+ s.RemoveAll(staticPropertySets[urxGcControl])
+ s.RemoveAll(staticPropertySets[urxGcL])
+ s.RemoveAll(staticPropertySets[urxGcV])
+ s.RemoveAll(staticPropertySets[urxGcT])
+ return s.Freeze()
+ }()
+}
+
+var staticSetUnescape = func() *uset.UnicodeSet {
+ u := uset.New()
+ u.AddString("acefnrtuUx")
+ return u.Freeze()
+}()
+
+const (
+ ruleSetDigitChar = 128
+ ruleSetASCIILetter = 129
+ ruleSetRuleChar = 130
+ ruleSetCount = 131 - 128
+)
+
+var staticRuleSet = [ruleSetCount]*uset.UnicodeSet{
+ func() *uset.UnicodeSet {
+ u := uset.New()
+ u.AddRuneRange('0', '9')
+ return u.Freeze()
+ }(),
+ func() *uset.UnicodeSet {
+ u := uset.New()
+ u.AddRuneRange('A', 'Z')
+ u.AddRuneRange('a', 'z')
+ return u.Freeze()
+ }(),
+ func() *uset.UnicodeSet {
+ u := uset.New()
+ u.AddString("*?+[(){}^$|\\.")
+ u.Complement()
+ return u.Freeze()
+ }(),
+}
diff --git a/go/mysql/icuregex/sets_test.go b/go/mysql/icuregex/sets_test.go
new file mode 100644
index 00000000000..58da9882701
--- /dev/null
+++ b/go/mysql/icuregex/sets_test.go
@@ -0,0 +1,66 @@
+/*
+© 2016 and later: Unicode, Inc. and others.
+Copyright (C) 2004-2015, International Business Machines Corporation and others.
+Copyright 2023 The Vitess Authors.
+
+This file contains code derived from the Unicode Project's ICU library.
+License & terms of use for the original code: http://www.unicode.org/copyright.html
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package icuregex
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestStaticSetContents(t *testing.T) {
+ // These are the number of codepoints contained in each of the static sets as of ICU73-2,
+ // as to sanity check that we're re-creating the sets properly.
+ // This table must be re-created when updating Unicode versions.
+ var ExpectedSetSizes = map[int]int{
+ 1: 139612,
+ 4: 25,
+ 5: 1102442,
+ 6: 2125,
+ 7: 140,
+ 8: 125,
+ 9: 399,
+ 10: 10773,
+ 11: 95,
+ 12: 137,
+ }
+
+ for setid, expected := range ExpectedSetSizes {
+ assert.Equalf(t, expected, staticPropertySets[setid].Len(), "static set [%d] has wrong size", setid)
+ }
+}
+
+func TestStaticFreeze(t *testing.T) {
+ for _, s := range staticPropertySets {
+ if err := s.FreezeCheck_(); err != nil {
+ t.Error(err)
+ }
+ }
+ for _, s := range staticRuleSet {
+ if err := s.FreezeCheck_(); err != nil {
+ t.Error(err)
+ }
+ }
+ if err := staticSetUnescape.FreezeCheck_(); err != nil {
+ t.Error(err)
+ }
+}
diff --git a/go/mysql/icuregex/testdata/re_tests.txt b/go/mysql/icuregex/testdata/re_tests.txt
new file mode 100644
index 00000000000..c18b638f9b3
--- /dev/null
+++ b/go/mysql/icuregex/testdata/re_tests.txt
@@ -0,0 +1,923 @@
+abc abc y $& abc
+abc abc y $-[0] 0
+abc abc y $+[0] 3
+abc xbc n - -
+abc axc n - -
+abc abx n - -
+abc xabcy y $& abc
+abc xabcy y $-[0] 1
+abc xabcy y $+[0] 4
+abc ababc y $& abc
+abc ababc y $-[0] 2
+abc ababc y $+[0] 5
+ab*c abc y $& abc
+ab*c abc y $-[0] 0
+ab*c abc y $+[0] 3
+ab*bc abc y $& abc
+ab*bc abc y $-[0] 0
+ab*bc abc y $+[0] 3
+ab*bc abbc y $& abbc
+ab*bc abbc y $-[0] 0
+ab*bc abbc y $+[0] 4
+ab*bc abbbbc y $& abbbbc
+ab*bc abbbbc y $-[0] 0
+ab*bc abbbbc y $+[0] 6
+.{1} abbbbc y $& a
+.{1} abbbbc y $-[0] 0
+.{1} abbbbc y $+[0] 1
+.{3,4} abbbbc y $& abbb
+.{3,4} abbbbc y $-[0] 0
+.{3,4} abbbbc y $+[0] 4
+ab{0,}bc abbbbc y $& abbbbc
+ab{0,}bc abbbbc y $-[0] 0
+ab{0,}bc abbbbc y $+[0] 6
+ab+bc abbc y $& abbc
+ab+bc abbc y $-[0] 0
+ab+bc abbc y $+[0] 4
+ab+bc abc n - -
+ab+bc abq n - -
+ab{1,}bc abq n - -
+ab+bc abbbbc y $& abbbbc
+ab+bc abbbbc y $-[0] 0
+ab+bc abbbbc y $+[0] 6
+ab{1,}bc abbbbc y $& abbbbc
+ab{1,}bc abbbbc y $-[0] 0
+ab{1,}bc abbbbc y $+[0] 6
+ab{1,3}bc abbbbc y $& abbbbc
+ab{1,3}bc abbbbc y $-[0] 0
+ab{1,3}bc abbbbc y $+[0] 6
+ab{3,4}bc abbbbc y $& abbbbc
+ab{3,4}bc abbbbc y $-[0] 0
+ab{3,4}bc abbbbc y $+[0] 6
+ab{4,5}bc abbbbc n - -
+ab?bc abbc y $& abbc
+ab?bc abc y $& abc
+ab{0,1}bc abc y $& abc
+ab?bc abbbbc n - -
+ab?c abc y $& abc
+ab{0,1}c abc y $& abc
+^abc$ abc y $& abc
+^abc$ abcc n - -
+^abc abcc y $& abc
+^abc$ aabc n - -
+abc$ aabc y $& abc
+abc$ aabcd n - -
+^ abc y $&
+$ abc y $&
+a.c abc y $& abc
+a.c axc y $& axc
+a.*c axyzc y $& axyzc
+a.*c axyzd n - -
+a[bc]d abc n - -
+a[bc]d abd y $& abd
+a[b-d]e abd n - -
+a[b-d]e ace y $& ace
+a[b-d] aac y $& ac
+a[-b] a- y $& a-
+a[b-] a- y $& a-
+a[b-a] - c - Invalid [] range "b-a"
+a[]b - ci - Unmatched [
+a[ - c - Unmatched [
+a] a] y $& a]
+a[]]b a]b y $& a]b
+a[^bc]d aed y $& aed
+a[^bc]d abd n - -
+a[^-b]c adc y $& adc
+a[^-b]c a-c n - -
+a[^]b]c a]c n - -
+a[^]b]c adc y $& adc
+\ba\b a- y - -
+\ba\b -a y - -
+\ba\b -a- y - -
+\by\b xy n - -
+\by\b yz n - -
+\by\b xyz n - -
+\Ba\B a- n - -
+\Ba\B -a n - -
+\Ba\B -a- n - -
+\By\b xy y - -
+\By\b xy y $-[0] 1
+\By\b xy y $+[0] 2
+\By\b xy y - -
+\by\B yz y - -
+\By\B xyz y - -
+\w a y - -
+\w - n - -
+\W a n - -
+\W - y - -
+a\sb a b y - -
+a\sb a-b n - -
+a\Sb a b n - -
+a\Sb a-b y - -
+\d 1 y - -
+\d - n - -
+\D 1 n - -
+\D - y - -
+[\w] a y - -
+[\w] - n - -
+[\W] a n - -
+[\W] - y - -
+a[\s]b a b y - -
+a[\s]b a-b n - -
+a[\S]b a b n - -
+a[\S]b a-b y - -
+[\d] 1 y - -
+[\d] - n - -
+[\D] 1 n - -
+[\D] - y - -
+ab|cd abc y $& ab
+ab|cd abcd y $& ab
+()ef def y $&-$1 ef-
+()ef def y $-[0] 1
+()ef def y $+[0] 3
+()ef def y $-[1] 1
+()ef def y $+[1] 1
+*a - c - Quantifier follows nothing
+(*)b - c - Quantifier follows nothing
+$b b n - -
+a\ - c - Search pattern not terminated
+a\(b a(b y $&-$1 a(b-
+a\(*b ab y $& ab
+a\(*b a((b y $& a((b
+a\\b a\b y $& a\b
+abc) - c - Unmatched )
+(abc - c - Unmatched (
+((a)) abc y $&-$1-$2 a-a-a
+((a)) abc y $-[0]-$-[1]-$-[2] 0-0-0
+((a)) abc y $+[0]-$+[1]-$+[2] 1-1-1
+((a)) abc by @- 0 0 0
+((a)) abc by @+ 1 1 1
+(a)b(c) abc y $&-$1-$2 abc-a-c
+(a)b(c) abc y $-[0]-$-[1]-$-[2] 0-0-2
+(a)b(c) abc y $+[0]-$+[1]-$+[2] 3-1-3
+a+b+c aabbabc y $& abc
+a{1,}b{1,}c aabbabc y $& abc
+a** - c - Nested quantifiers
+a.+?c abcabc y $& abc
+(a+|b)* ab y $&-$1 ab-b
+(a+|b)* ab y $-[0] 0
+(a+|b)* ab y $+[0] 2
+(a+|b)* ab y $-[1] 1
+(a+|b)* ab y $+[1] 2
+(a+|b){0,} ab y $&-$1 ab-b
+(a+|b)+ ab y $&-$1 ab-b
+(a+|b){1,} ab y $&-$1 ab-b
+(a+|b)? ab y $&-$1 a-a
+(a+|b){0,1} ab y $&-$1 a-a
+)( - c - Unmatched )
+[^ab]* cde y $& cde
+abc n - -
+a* y $&
+([abc])*d abbbcd y $&-$1 abbbcd-c
+([abc])*bcd abcd y $&-$1 abcd-a
+a|b|c|d|e e y $& e
+(a|b|c|d|e)f ef y $&-$1 ef-e
+(a|b|c|d|e)f ef y $-[0] 0
+(a|b|c|d|e)f ef y $+[0] 2
+(a|b|c|d|e)f ef y $-[1] 0
+(a|b|c|d|e)f ef y $+[1] 1
+abcd*efg abcdefg y $& abcdefg
+ab* xabyabbbz y $& ab
+ab* xayabbbz y $& a
+(ab|cd)e abcde y $&-$1 cde-cd
+[abhgefdc]ij hij y $& hij
+^(ab|cd)e abcde n x$1y xy
+(abc|)ef abcdef y $&-$1 ef-
+(a|b)c*d abcd y $&-$1 bcd-b
+(ab|ab*)bc abc y $&-$1 abc-a
+a([bc]*)c* abc y $&-$1 abc-bc
+a([bc]*)(c*d) abcd y $&-$1-$2 abcd-bc-d
+a([bc]*)(c*d) abcd y $-[0] 0
+a([bc]*)(c*d) abcd y $+[0] 4
+a([bc]*)(c*d) abcd y $-[1] 1
+a([bc]*)(c*d) abcd y $+[1] 3
+a([bc]*)(c*d) abcd y $-[2] 3
+a([bc]*)(c*d) abcd y $+[2] 4
+a([bc]+)(c*d) abcd y $&-$1-$2 abcd-bc-d
+a([bc]*)(c+d) abcd y $&-$1-$2 abcd-b-cd
+a([bc]*)(c+d) abcd y $-[0] 0
+a([bc]*)(c+d) abcd y $+[0] 4
+a([bc]*)(c+d) abcd y $-[1] 1
+a([bc]*)(c+d) abcd y $+[1] 2
+a([bc]*)(c+d) abcd y $-[2] 2
+a([bc]*)(c+d) abcd y $+[2] 4
+a[bcd]*dcdcde adcdcde y $& adcdcde
+a[bcd]+dcdcde adcdcde n - -
+(ab|a)b*c abc y $&-$1 abc-ab
+(ab|a)b*c abc y $-[0] 0
+(ab|a)b*c abc y $+[0] 3
+(ab|a)b*c abc y $-[1] 0
+(ab|a)b*c abc y $+[1] 2
+((a)(b)c)(d) abcd y $1-$2-$3-$4 abc-a-b-d
+((a)(b)c)(d) abcd y $-[0] 0
+((a)(b)c)(d) abcd y $+[0] 4
+((a)(b)c)(d) abcd y $-[1] 0
+((a)(b)c)(d) abcd y $+[1] 3
+((a)(b)c)(d) abcd y $-[2] 0
+((a)(b)c)(d) abcd y $+[2] 1
+((a)(b)c)(d) abcd y $-[3] 1
+((a)(b)c)(d) abcd y $+[3] 2
+((a)(b)c)(d) abcd y $-[4] 3
+((a)(b)c)(d) abcd y $+[4] 4
+[a-zA-Z_][a-zA-Z0-9_]* alpha y $& alpha
+^a(bc+|b[eh])g|.h$ abh y $&-$1 bh-
+(bc+d$|ef*g.|h?i(j|k)) effgz y $&-$1-$2 effgz-effgz-
+(bc+d$|ef*g.|h?i(j|k)) ij y $&-$1-$2 ij-ij-j
+(bc+d$|ef*g.|h?i(j|k)) effg n - -
+(bc+d$|ef*g.|h?i(j|k)) bcdd n - -
+(bc+d$|ef*g.|h?i(j|k)) reffgz y $&-$1-$2 effgz-effgz-
+((((((((((a)))))))))) a y $10 a
+((((((((((a)))))))))) a y $-[0] 0
+((((((((((a)))))))))) a y $+[0] 1
+((((((((((a)))))))))) a y $-[10] 0
+((((((((((a)))))))))) a y $+[10] 1
+((((((((((a))))))))))\10 aa y $& aa
+((((((((((a))))))))))${bang} aa n - -
+((((((((((a))))))))))${bang} a! y $& a!
+(((((((((a))))))))) a y $& a
+multiple words of text uh-uh n - -
+multiple words multiple words, yeah y $& multiple words
+(.*)c(.*) abcde y $&-$1-$2 abcde-ab-de
+\((.*), (.*)\) (a, b) y ($2, $1) (b, a)
+[k] ab n - -
+abcd abcd y $&-\$&-\\$& abcd-$&-\abcd
+a(bc)d abcd y $1-\$1-\\$1 bc-$1-\bc
+a[-]?c ac y $& ac
+(abc)\1 abcabc y $1 abc
+([a-c]*)\1 abcabc y $1 abc
+\1 - c - Reference to nonexistent group
+\2 - c - Reference to nonexistent group
+(a)|\1 a y - -
+(a)|\1 x n - -
+(a)|\2 - c - Reference to nonexistent group
+(([a-c])b*?\2)* ababbbcbc y $&-$1-$2 ababb-bb-b
+(([a-c])b*?\2){3} ababbbcbc y $&-$1-$2 ababbbcbc-cbc-c
+((\3|b)\2(a)x)+ aaxabxbaxbbx n - -
+((\3|b)\2(a)x)+ aaaxabaxbaaxbbax y $&-$1-$2-$3 bbax-bbax-b-a
+((\3|b)\2(a)){2,} bbaababbabaaaaabbaaaabba y $&-$1-$2-$3 bbaaaabba-bba-b-a
+(a)|(b) b y $-[0] 0
+(a)|(b) b y $+[0] 1
+(a)|(b) b y x$-[1] x
+(a)|(b) b y x$+[1] x
+(a)|(b) b y $-[2] 0
+(a)|(b) b y $+[2] 1
+'abc'i ABC y $& ABC
+'abc'i XBC n - -
+'abc'i AXC n - -
+'abc'i ABX n - -
+'abc'i XABCY y $& ABC
+'abc'i ABABC y $& ABC
+'ab*c'i ABC y $& ABC
+'ab*bc'i ABC y $& ABC
+'ab*bc'i ABBC y $& ABBC
+'ab*?bc'i ABBBBC y $& ABBBBC
+'ab{0,}?bc'i ABBBBC y $& ABBBBC
+'ab+?bc'i ABBC y $& ABBC
+'ab+bc'i ABC n - -
+'ab+bc'i ABQ n - -
+'ab{1,}bc'i ABQ n - -
+'ab+bc'i ABBBBC y $& ABBBBC
+'ab{1,}?bc'i ABBBBC y $& ABBBBC
+'ab{1,3}?bc'i ABBBBC y $& ABBBBC
+'ab{3,4}?bc'i ABBBBC y $& ABBBBC
+'ab{4,5}?bc'i ABBBBC n - -
+'ab??bc'i ABBC y $& ABBC
+'ab??bc'i ABC y $& ABC
+'ab{0,1}?bc'i ABC y $& ABC
+'ab??bc'i ABBBBC n - -
+'ab??c'i ABC y $& ABC
+'ab{0,1}?c'i ABC y $& ABC
+'^abc$'i ABC y $& ABC
+'^abc$'i ABCC n - -
+'^abc'i ABCC y $& ABC
+'^abc$'i AABC n - -
+'abc$'i AABC y $& ABC
+'^'i ABC y $&
+'$'i ABC y $&
+'a.c'i ABC y $& ABC
+'a.c'i AXC y $& AXC
+'a.*?c'i AXYZC y $& AXYZC
+'a.*c'i AXYZD n - -
+'a[bc]d'i ABC n - -
+'a[bc]d'i ABD y $& ABD
+'a[b-d]e'i ABD n - -
+'a[b-d]e'i ACE y $& ACE
+'a[b-d]'i AAC y $& AC
+'a[-b]'i A- y $& A-
+'a[b-]'i A- y $& A-
+'a[b-a]'i - c - Invalid [] range "b-a"
+'a[]b'i - ci - Unmatched [
+'a['i - c - Unmatched [
+'a]'i A] y $& A]
+'a[]]b'i A]B y $& A]B
+'a[^bc]d'i AED y $& AED
+'a[^bc]d'i ABD n - -
+'a[^-b]c'i ADC y $& ADC
+'a[^-b]c'i A-C n - -
+'a[^]b]c'i A]C n - -
+'a[^]b]c'i ADC y $& ADC
+'ab|cd'i ABC y $& AB
+'ab|cd'i ABCD y $& AB
+'()ef'i DEF y $&-$1 EF-
+'*a'i - c - Quantifier follows nothing
+'(*)b'i - c - Quantifier follows nothing
+'$b'i B n - -
+'a\'i - c - Search pattern not terminated
+'a\(b'i A(B y $&-$1 A(B-
+'a\(*b'i AB y $& AB
+'a\(*b'i A((B y $& A((B
+'a\\b'i A\B y $& A\B
+'abc)'i - c - Unmatched )
+'(abc'i - c - Unmatched (
+'((a))'i ABC y $&-$1-$2 A-A-A
+'(a)b(c)'i ABC y $&-$1-$2 ABC-A-C
+'a+b+c'i AABBABC y $& ABC
+'a{1,}b{1,}c'i AABBABC y $& ABC
+'a**'i - c - Nested quantifiers
+'a.+?c'i ABCABC y $& ABC
+'a.*?c'i ABCABC y $& ABC
+'a.{0,5}?c'i ABCABC y $& ABC
+'(a+|b)*'i AB y $&-$1 AB-B
+'(a+|b){0,}'i AB y $&-$1 AB-B
+'(a+|b)+'i AB y $&-$1 AB-B
+'(a+|b){1,}'i AB y $&-$1 AB-B
+'(a+|b)?'i AB y $&-$1 A-A
+'(a+|b){0,1}'i AB y $&-$1 A-A
+'(a+|b){0,1}?'i AB y $&-$1 -
+')('i - c - Unmatched )
+'[^ab]*'i CDE y $& CDE
+'abc'i n - -
+'a*'i y $&
+'([abc])*d'i ABBBCD y $&-$1 ABBBCD-C
+'([abc])*bcd'i ABCD y $&-$1 ABCD-A
+'a|b|c|d|e'i E y $& E
+'(a|b|c|d|e)f'i EF y $&-$1 EF-E
+'abcd*efg'i ABCDEFG y $& ABCDEFG
+'ab*'i XABYABBBZ y $& AB
+'ab*'i XAYABBBZ y $& A
+'(ab|cd)e'i ABCDE y $&-$1 CDE-CD
+'[abhgefdc]ij'i HIJ y $& HIJ
+'^(ab|cd)e'i ABCDE n x$1y XY
+'(abc|)ef'i ABCDEF y $&-$1 EF-
+'(a|b)c*d'i ABCD y $&-$1 BCD-B
+'(ab|ab*)bc'i ABC y $&-$1 ABC-A
+'a([bc]*)c*'i ABC y $&-$1 ABC-BC
+'a([bc]*)(c*d)'i ABCD y $&-$1-$2 ABCD-BC-D
+'a([bc]+)(c*d)'i ABCD y $&-$1-$2 ABCD-BC-D
+'a([bc]*)(c+d)'i ABCD y $&-$1-$2 ABCD-B-CD
+'a[bcd]*dcdcde'i ADCDCDE y $& ADCDCDE
+'a[bcd]+dcdcde'i ADCDCDE n - -
+'(ab|a)b*c'i ABC y $&-$1 ABC-AB
+'((a)(b)c)(d)'i ABCD y $1-$2-$3-$4 ABC-A-B-D
+'[a-zA-Z_][a-zA-Z0-9_]*'i ALPHA y $& ALPHA
+'^a(bc+|b[eh])g|.h$'i ABH y $&-$1 BH-
+'(bc+d$|ef*g.|h?i(j|k))'i EFFGZ y $&-$1-$2 EFFGZ-EFFGZ-
+'(bc+d$|ef*g.|h?i(j|k))'i IJ y $&-$1-$2 IJ-IJ-J
+'(bc+d$|ef*g.|h?i(j|k))'i EFFG n - -
+'(bc+d$|ef*g.|h?i(j|k))'i BCDD n - -
+'(bc+d$|ef*g.|h?i(j|k))'i REFFGZ y $&-$1-$2 EFFGZ-EFFGZ-
+'((((((((((a))))))))))'i A y $10 A
+'((((((((((a))))))))))\10'i AA y $& AA
+'((((((((((a))))))))))${bang}'i AA n - -
+'((((((((((a))))))))))${bang}'i A! y $& A!
+'(((((((((a)))))))))'i A y $& A
+'(?:(?:(?:(?:(?:(?:(?:(?:(?:(a))))))))))'i A y $1 A
+'(?:(?:(?:(?:(?:(?:(?:(?:(?:(a|b|c))))))))))'i C y $1 C
+'multiple words of text'i UH-UH n - -
+'multiple words'i MULTIPLE WORDS, YEAH y $& MULTIPLE WORDS
+'(.*)c(.*)'i ABCDE y $&-$1-$2 ABCDE-AB-DE
+'\((.*), (.*)\)'i (A, B) y ($2, $1) (B, A)
+'[k]'i AB n - -
+'abcd'i ABCD y $&-\$&-\\$& ABCD-$&-\ABCD
+'a(bc)d'i ABCD y $1-\$1-\\$1 BC-$1-\BC
+'a[-]?c'i AC y $& AC
+'(abc)\1'i ABCABC y $1 ABC
+'([a-c]*)\1'i ABCABC y $1 ABC
+a(?!b). abad y $& ad
+a(?=d). abad y $& ad
+a(?=c|d). abad y $& ad
+a(?:b|c|d)(.) ace y $1 e
+a(?:b|c|d)*(.) ace y $1 e
+a(?:b|c|d)+?(.) ace y $1 e
+a(?:b|c|d)+?(.) acdbcdbe y $1 d
+a(?:b|c|d)+(.) acdbcdbe y $1 e
+a(?:b|c|d){2}(.) acdbcdbe y $1 b
+a(?:b|c|d){4,5}(.) acdbcdbe y $1 b
+a(?:b|c|d){4,5}?(.) acdbcdbe y $1 d
+((foo)|(bar))* foobar y $1-$2-$3 bar-foo-bar
+:(?: - c - Sequence (? incomplete
+a(?:b|c|d){6,7}(.) acdbcdbe y $1 e
+a(?:b|c|d){6,7}?(.) acdbcdbe y $1 e
+a(?:b|c|d){5,6}(.) acdbcdbe y $1 e
+a(?:b|c|d){5,6}?(.) acdbcdbe y $1 b
+a(?:b|c|d){5,7}(.) acdbcdbe y $1 e
+a(?:b|c|d){5,7}?(.) acdbcdbe y $1 b
+a(?:b|(c|e){1,2}?|d)+?(.) ace y $1$2 ce
+^(.+)?B AB y $1 A
+^([^a-z])|(\^)$ . y $1 .
+^[<>]& <&OUT y $& <&
+^(a\1?){4}$ aaaaaaaaaa y $1 aaaa
+^(a\1?){4}$ aaaaaaaaa n - -
+^(a\1?){4}$ aaaaaaaaaaa n - -
+^(a(?(1)\1)){4}$ aaaaaaaaaa y $1 aaaa
+^(a(?(1)\1)){4}$ aaaaaaaaa n - -
+^(a(?(1)\1)){4}$ aaaaaaaaaaa n - -
+((a{4})+) aaaaaaaaa y $1 aaaaaaaa
+(((aa){2})+) aaaaaaaaaa y $1 aaaaaaaa
+(((a{2}){2})+) aaaaaaaaaa y $1 aaaaaaaa
+(?:(f)(o)(o)|(b)(a)(r))* foobar y $1:$2:$3:$4:$5:$6 f:o:o:b:a:r
+(?<=a)b ab y $& b
+(?<=a)b cb n - -
+(?<=a)b b n - -
+(?a+)ab aaab n - -
+(?>a+)b aaab y - -
+([[:]+) a:[b]: yi $1 :[ Java and ICU dont escape [[xyz
+([[=]+) a=[b]= yi $1 =[ Java and ICU dont escape [[xyz
+([[.]+) a.[b]. yi $1 .[ Java and ICU dont escape [[xyz
+[a[:xyz: - c - Unmatched [
+[a[:xyz:] - c - POSIX class [:xyz:] unknown
+[a[:]b[:c] abc yi $& abc Java and ICU embedded [ is nested set
+([a[:xyz:]b]+) pbaq c - POSIX class [:xyz:] unknown
+[a[:]b[:c] abc iy $& abc Java and ICU embedded [ is nested set
+([[:alpha:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 ABcd
+([[:alnum:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 ABcd01Xy
+([[:ascii:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 ABcd01Xy__-- ${nulnul}
+([[:cntrl:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 ${nulnul}
+([[:digit:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 01
+([[:graph:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 ABcd01Xy__--
+([[:lower:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 cd
+([[:print:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 ABcd01Xy__--
+([[:punct:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 __--
+([[:space:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1
+([[:word:]]+) ABcd01Xy__-- ${nulnul}${ffff} yi $1 ABcd01Xy__
+([[:upper:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 AB
+([[:xdigit:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 ABcd01
+([[:^alpha:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 01
+([[:^alnum:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 __-- ${nulnul}${ffff}
+([[:^ascii:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 ${ffff}
+([[:^cntrl:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 ABcd01Xy__--
+([[:^digit:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 ABcd
+([[:^lower:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 AB
+([[:^print:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 ${nulnul}${ffff}
+([[:^punct:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 ABcd01Xy
+([[:^space:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 ABcd01Xy__--
+([[:^word:]]+) ABcd01Xy__-- ${nulnul}${ffff} yi $1 -- ${nulnul}${ffff}
+([[:^upper:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 cd01
+([[:^xdigit:]]+) ABcd01Xy__-- ${nulnul}${ffff} y $1 Xy__-- ${nulnul}${ffff}
+[[:foo:]] - c - POSIX class [:foo:] unknown
+[[:^foo:]] - c - POSIX class [:^foo:] unknown
+((?>a+)b) aaab y $1 aaab
+(?>(a+))b aaab y $1 aaa
+((?>[^()]+)|\([^()]*\))+ ((abc(ade)ufh()()x y $& abc(ade)ufh()()x
+(?<=x+)y - c - Variable length lookbehind not implemented
+a{37,17} - c - Can't do {n,m} with n > m
+\Z a\nb\n y $-[0] 3
+\z a\nb\n y $-[0] 4
+$ a\nb\n y $-[0] 3
+\Z b\na\n y $-[0] 3
+\z b\na\n y $-[0] 4
+$ b\na\n y $-[0] 3
+\Z b\na y $-[0] 3
+\z b\na y $-[0] 3
+$ b\na y $-[0] 3
+'\Z'm a\nb\n y $-[0] 3
+'\z'm a\nb\n y $-[0] 4
+'$'m a\nb\n y $-[0] 1
+'\Z'm b\na\n y $-[0] 3
+'\z'm b\na\n y $-[0] 4
+'$'m b\na\n y $-[0] 1
+'\Z'm b\na y $-[0] 3
+'\z'm b\na y $-[0] 3
+'$'m b\na y $-[0] 1
+a\Z a\nb\n n - -
+a\z a\nb\n n - -
+a$ a\nb\n n - -
+a\Z b\na\n y $-[0] 2
+a\z b\na\n n - -
+a$ b\na\n y $-[0] 2
+a\Z b\na y $-[0] 2
+a\z b\na y $-[0] 2
+a$ b\na y $-[0] 2
+'a\Z'm a\nb\n n - -
+'a\z'm a\nb\n n - -
+'a$'m a\nb\n y $-[0] 0
+'a\Z'm b\na\n y $-[0] 2
+'a\z'm b\na\n n - -
+'a$'m b\na\n y $-[0] 2
+'a\Z'm b\na y $-[0] 2
+'a\z'm b\na y $-[0] 2
+'a$'m b\na y $-[0] 2
+aa\Z aa\nb\n n - -
+aa\z aa\nb\n n - -
+aa$ aa\nb\n n - -
+aa\Z b\naa\n y $-[0] 2
+aa\z b\naa\n n - -
+aa$ b\naa\n y $-[0] 2
+aa\Z b\naa y $-[0] 2
+aa\z b\naa y $-[0] 2
+aa$ b\naa y $-[0] 2
+'aa\Z'm aa\nb\n n - -
+'aa\z'm aa\nb\n n - -
+'aa$'m aa\nb\n y $-[0] 0
+'aa\Z'm b\naa\n y $-[0] 2
+'aa\z'm b\naa\n n - -
+'aa$'m b\naa\n y $-[0] 2
+'aa\Z'm b\naa y $-[0] 2
+'aa\z'm b\naa y $-[0] 2
+'aa$'m b\naa y $-[0] 2
+aa\Z ac\nb\n n - -
+aa\z ac\nb\n n - -
+aa$ ac\nb\n n - -
+aa\Z b\nac\n n - -
+aa\z b\nac\n n - -
+aa$ b\nac\n n - -
+aa\Z b\nac n - -
+aa\z b\nac n - -
+aa$ b\nac n - -
+'aa\Z'm ac\nb\n n - -
+'aa\z'm ac\nb\n n - -
+'aa$'m ac\nb\n n - -
+'aa\Z'm b\nac\n n - -
+'aa\z'm b\nac\n n - -
+'aa$'m b\nac\n n - -
+'aa\Z'm b\nac n - -
+'aa\z'm b\nac n - -
+'aa$'m b\nac n - -
+aa\Z ca\nb\n n - -
+aa\z ca\nb\n n - -
+aa$ ca\nb\n n - -
+aa\Z b\nca\n n - -
+aa\z b\nca\n n - -
+aa$ b\nca\n n - -
+aa\Z b\nca n - -
+aa\z b\nca n - -
+aa$ b\nca n - -
+'aa\Z'm ca\nb\n n - -
+'aa\z'm ca\nb\n n - -
+'aa$'m ca\nb\n n - -
+'aa\Z'm b\nca\n n - -
+'aa\z'm b\nca\n n - -
+'aa$'m b\nca\n n - -
+'aa\Z'm b\nca n - -
+'aa\z'm b\nca n - -
+'aa$'m b\nca n - -
+ab\Z ab\nb\n n - -
+ab\z ab\nb\n n - -
+ab$ ab\nb\n n - -
+ab\Z b\nab\n y $-[0] 2
+ab\z b\nab\n n - -
+ab$ b\nab\n y $-[0] 2
+ab\Z b\nab y $-[0] 2
+ab\z b\nab y $-[0] 2
+ab$ b\nab y $-[0] 2
+'ab\Z'm ab\nb\n n - -
+'ab\z'm ab\nb\n n - -
+'ab$'m ab\nb\n y $-[0] 0
+'ab\Z'm b\nab\n y $-[0] 2
+'ab\z'm b\nab\n n - -
+'ab$'m b\nab\n y $-[0] 2
+'ab\Z'm b\nab y $-[0] 2
+'ab\z'm b\nab y $-[0] 2
+'ab$'m b\nab y $-[0] 2
+ab\Z ac\nb\n n - -
+ab\z ac\nb\n n - -
+ab$ ac\nb\n n - -
+ab\Z b\nac\n n - -
+ab\z b\nac\n n - -
+ab$ b\nac\n n - -
+ab\Z b\nac n - -
+ab\z b\nac n - -
+ab$ b\nac n - -
+'ab\Z'm ac\nb\n n - -
+'ab\z'm ac\nb\n n - -
+'ab$'m ac\nb\n n - -
+'ab\Z'm b\nac\n n - -
+'ab\z'm b\nac\n n - -
+'ab$'m b\nac\n n - -
+'ab\Z'm b\nac n - -
+'ab\z'm b\nac n - -
+'ab$'m b\nac n - -
+ab\Z ca\nb\n n - -
+ab\z ca\nb\n n - -
+ab$ ca\nb\n n - -
+ab\Z b\nca\n n - -
+ab\z b\nca\n n - -
+ab$ b\nca\n n - -
+ab\Z b\nca n - -
+ab\z b\nca n - -
+ab$ b\nca n - -
+'ab\Z'm ca\nb\n n - -
+'ab\z'm ca\nb\n n - -
+'ab$'m ca\nb\n n - -
+'ab\Z'm b\nca\n n - -
+'ab\z'm b\nca\n n - -
+'ab$'m b\nca\n n - -
+'ab\Z'm b\nca n - -
+'ab\z'm b\nca n - -
+'ab$'m b\nca n - -
+abb\Z abb\nb\n n - -
+abb\z abb\nb\n n - -
+abb$ abb\nb\n n - -
+abb\Z b\nabb\n y $-[0] 2
+abb\z b\nabb\n n - -
+abb$ b\nabb\n y $-[0] 2
+abb\Z b\nabb y $-[0] 2
+abb\z b\nabb y $-[0] 2
+abb$ b\nabb y $-[0] 2
+'abb\Z'm abb\nb\n n - -
+'abb\z'm abb\nb\n n - -
+'abb$'m abb\nb\n y $-[0] 0
+'abb\Z'm b\nabb\n y $-[0] 2
+'abb\z'm b\nabb\n n - -
+'abb$'m b\nabb\n y $-[0] 2
+'abb\Z'm b\nabb y $-[0] 2
+'abb\z'm b\nabb y $-[0] 2
+'abb$'m b\nabb y $-[0] 2
+abb\Z ac\nb\n n - -
+abb\z ac\nb\n n - -
+abb$ ac\nb\n n - -
+abb\Z b\nac\n n - -
+abb\z b\nac\n n - -
+abb$ b\nac\n n - -
+abb\Z b\nac n - -
+abb\z b\nac n - -
+abb$ b\nac n - -
+'abb\Z'm ac\nb\n n - -
+'abb\z'm ac\nb\n n - -
+'abb$'m ac\nb\n n - -
+'abb\Z'm b\nac\n n - -
+'abb\z'm b\nac\n n - -
+'abb$'m b\nac\n n - -
+'abb\Z'm b\nac n - -
+'abb\z'm b\nac n - -
+'abb$'m b\nac n - -
+abb\Z ca\nb\n n - -
+abb\z ca\nb\n n - -
+abb$ ca\nb\n n - -
+abb\Z b\nca\n n - -
+abb\z b\nca\n n - -
+abb$ b\nca\n n - -
+abb\Z b\nca n - -
+abb\z b\nca n - -
+abb$ b\nca n - -
+'abb\Z'm ca\nb\n n - -
+'abb\z'm ca\nb\n n - -
+'abb$'m ca\nb\n n - -
+'abb\Z'm b\nca\n n - -
+'abb\z'm b\nca\n n - -
+'abb$'m b\nca\n n - -
+'abb\Z'm b\nca n - -
+'abb\z'm b\nca n - -
+'abb$'m b\nca n - -
+(^|x)(c) ca y $2 c
+a*abc?xyz+pqr{3}ab{2,}xy{4,5}pq{0,6}AB{0,}zz x n - -
+a(?{$a=2;$b=3;($b)=$a})b yabz y $b 2
+round\(((?>[^()]+))\) _I(round(xs * sz),1) y $1 xs * sz
+'((?x:.) )' x y $1- x -
+'((?-x:.) )'x x y $1- x-
+foo.bart foo.bart y - -
+'^d[x][x][x]'m abcd\ndxxx y - -
+.X(.+)+X bbbbXcXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa yi - - # TODO: ICU doesn't optimize on trailing literals in pattern.
+.X(.+)+XX bbbbXcXXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa yi - -
+.XX(.+)+X bbbbXXcXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa yi - -
+.X(.+)+X bbbbXXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ni - -
+.X(.+)+XX bbbbXXXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ni - -
+.XX(.+)+X bbbbXXXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ni - -
+.X(.+)+[X] bbbbXcXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa yi - -
+.X(.+)+[X][X] bbbbXcXXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa yi - -
+.XX(.+)+[X] bbbbXXcXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa yi - -
+.X(.+)+[X] bbbbXXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ni - -
+.X(.+)+[X][X] bbbbXXXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ni - -
+.XX(.+)+[X] bbbbXXXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ni - -
+.[X](.+)+[X] bbbbXcXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa yi - -
+.[X](.+)+[X][X] bbbbXcXXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa yi - -
+.[X][X](.+)+[X] bbbbXXcXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa yi - -
+.[X](.+)+[X] bbbbXXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ni - -
+.[X](.+)+[X][X] bbbbXXXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ni - -
+.[X][X](.+)+[X] bbbbXXXaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa ni - -
+tt+$ xxxtt y - -
+([a-\d]+) za-9z yi $1 a-9
+([\d-z]+) a0-za y $1 0-z
+([\d-\s]+) a0- z y $1 0-
+([a-[:digit:]]+) za-9z y $1 a-9
+([[:digit:]-z]+) =0-z= y $1 0-z
+([[:digit:]-[:alpha:]]+) =0-z= iy $1 0-z Set difference in ICU
+\GX.*X aaaXbX n - -
+(\d+\.\d+) 3.1415926 y $1 3.1415926
+(\ba.{0,10}br) have a web browser y $1 a web br
+'\.c(pp|xx|c)?$'i Changes n - -
+'\.c(pp|xx|c)?$'i IO.c y - -
+'(\.c(pp|xx|c)?$)'i IO.c y $1 .c
+^([a-z]:) C:/ n - -
+'^\S\s+aa$'m \nx aa y - -
+(^|a)b ab y - -
+^([ab]*?)(b)?(c)$ abac y -$2- --
+(\w)?(abc)\1b abcab n - -
+^(?:.,){2}c a,b,c y - -
+^(.,){2}c a,b,c y $1 b,
+^(?:[^,]*,){2}c a,b,c y - -
+^([^,]*,){2}c a,b,c y $1 b,
+^([^,]*,){3}d aaa,b,c,d y $1 c,
+^([^,]*,){3,}d aaa,b,c,d y $1 c,
+^([^,]*,){0,3}d aaa,b,c,d y $1 c,
+^([^,]{1,3},){3}d aaa,b,c,d y $1 c,
+^([^,]{1,3},){3,}d aaa,b,c,d y $1 c,
+^([^,]{1,3},){0,3}d aaa,b,c,d y $1 c,
+^([^,]{1,},){3}d aaa,b,c,d y $1 c,
+^([^,]{1,},){3,}d aaa,b,c,d y $1 c,
+^([^,]{1,},){0,3}d aaa,b,c,d y $1 c,
+^([^,]{0,3},){3}d aaa,b,c,d y $1 c,
+^([^,]{0,3},){3,}d aaa,b,c,d y $1 c,
+^([^,]{0,3},){0,3}d aaa,b,c,d y $1 c,
+(?i) y - -
+'(?!\A)x'm a\nxb\n y - -
+^(a(b)?)+$ aba yi -$1-$2- -a-- Java disagrees. Not clear who is right.
+'^.{9}abc.*\n'm 123\nabcabcabcabc\n y - -
+^(a)?a$ a y -$1- --
+^(a)?(?(1)a|b)+$ a n - -
+^(a\1?)(a\1?)(a\2?)(a\3?)$ aaaaaa y $1,$2,$3,$4 a,aa,a,aa
+^(a\1?){4}$ aaaaaa y $1 aa
+^(0+)?(?:x(1))? x1 y - -
+^([0-9a-fA-F]+)(?:x([0-9a-fA-F]+)?)(?:x([0-9a-fA-F]+))? 012cxx0190 y - -
+^(b+?|a){1,2}c bbbac y $1 a
+^(b+?|a){1,2}c bbbbac y $1 a
+\((\w\. \w+)\) cd. (A. Tw) y -$1- -A. Tw-
+((?:aaaa|bbbb)cccc)? aaaacccc y - -
+((?:aaaa|bbbb)cccc)? bbbbcccc y - -
+(a)?(a)+ a y $1:$2 :a -
+(ab)?(ab)+ ab y $1:$2 :ab -
+(abc)?(abc)+ abc y $1:$2 :abc -
+'b\s^'m a\nb\n n - -
+\ba a y - -
+^(a(??{"(?!)"})|(a)(?{1}))b ab yi $2 a # [ID 20010811.006]
+ab(?i)cd AbCd n - - # [ID 20010809.023]
+ab(?i)cd abCd y - -
+(A|B)*(?(1)(CD)|(CD)) CD y $2-$3 -CD
+(A|B)*(?(1)(CD)|(CD)) ABCD y $2-$3 CD-
+(A|B)*?(?(1)(CD)|(CD)) CD y $2-$3 -CD # [ID 20010803.016]
+(A|B)*?(?(1)(CD)|(CD)) ABCD y $2-$3 CD-
+'^(o)(?!.*\1)'i Oo n - -
+(.*)\d+\1 abc12bc y $1 bc
+(?m:(foo\s*$)) foo\n bar y $1 foo
+(.*)c abcd y $1 ab
+(.*)(?=c) abcd y $1 ab
+(.*)(?=c)c abcd yB $1 ab
+(.*)(?=b|c) abcd y $1 ab
+(.*)(?=b|c)c abcd y $1 ab
+(.*)(?=c|b) abcd y $1 ab
+(.*)(?=c|b)c abcd y $1 ab
+(.*)(?=[bc]) abcd y $1 ab
+(.*)(?=[bc])c abcd yB $1 ab
+(.*)(?<=b) abcd y $1 ab
+(.*)(?<=b)c abcd y $1 ab
+(.*)(?<=b|c) abcd y $1 abc
+(.*)(?<=b|c)c abcd y $1 ab
+(.*)(?<=c|b) abcd y $1 abc
+(.*)(?<=c|b)c abcd y $1 ab
+(.*)(?<=[bc]) abcd y $1 abc
+(.*)(?<=[bc])c abcd y $1 ab
+(.*?)c abcd y $1 ab
+(.*?)(?=c) abcd y $1 ab
+(.*?)(?=c)c abcd yB $1 ab
+(.*?)(?=b|c) abcd y $1 a
+(.*?)(?=b|c)c abcd y $1 ab
+(.*?)(?=c|b) abcd y $1 a
+(.*?)(?=c|b)c abcd y $1 ab
+(.*?)(?=[bc]) abcd y $1 a
+(.*?)(?=[bc])c abcd yB $1 ab
+(.*?)(?<=b) abcd y $1 ab
+(.*?)(?<=b)c abcd y $1 ab
+(.*?)(?<=b|c) abcd y $1 ab
+(.*?)(?<=b|c)c abcd y $1 ab
+(.*?)(?<=c|b) abcd y $1 ab
+(.*?)(?<=c|b)c abcd y $1 ab
+(.*?)(?<=[bc]) abcd y $1 ab
+(.*?)(?<=[bc])c abcd y $1 ab
+2(]*)?$\1 2 y $& 2
+(??{}) x yi - -
diff --git a/go/mysql/icuregex/testdata/regextst.txt b/go/mysql/icuregex/testdata/regextst.txt
new file mode 100644
index 00000000000..8d5d2c34a8e
--- /dev/null
+++ b/go/mysql/icuregex/testdata/regextst.txt
@@ -0,0 +1,2793 @@
+# Copyright (C) 2016 and later: Unicode, Inc. and others.
+# License & terms of use: http://www.unicode.org/copyright.html
+# Copyright (c) 2001-2015 International Business Machines
+# Corporation and others. All Rights Reserved.
+#
+# file:
+#
+# ICU regular expression test cases.
+#
+# format: one test case per line,
+# = [# comment]
+# = ""
+# = ""
+# the quotes on the pattern and match string can be " or ' or /
+# = text, with the start and end of each
+# capture group tagged with .... The overall match,
+# if any, is group 0, as in <0>matched text0>
+# A region can be specified with ... tags.
+# Standard ICU unescape will be applied, allowing \u, \U, etc. to appear.
+#
+# = any combination of
+# i case insensitive match
+# x free spacing and comments
+# s dot-matches-all mode
+# m multi-line mode.
+# ($ and ^ match at embedded new-lines)
+# D Unix Lines mode (only recognize 0x0a as new-line)
+# Q UREGEX_LITERAL flag. Entire pattern is literal string.
+# v If icu configured without break iteration, this
+# regex test pattern should not compile.
+# e set the UREGEX_ERROR_ON_UNKNOWN_ESCAPES flag
+# d dump the compiled pattern
+# t trace operation of match engine.
+# 2-9 a digit between 2 and 9, specifies the number of
+# times to execute find(). The expected results are
+# for the last find() in the sequence.
+# G Only check match / no match. Do not check capture groups.
+# E Pattern compilation error expected
+# L Use LookingAt() rather than find()
+# M Use matches() rather than find().
+#
+# a Use non-Anchoring Bounds.
+# b Use Transparent Bounds.
+# The a and b options only make a difference if
+# a region has been specified in the string.
+# z|Z hitEnd was expected(z) or not expected (Z).
+# With neither, hitEnd is not checked.
+# y|Y Require End expected(y) or not expected (Y).
+#
+# White space must be present between the flags and the match string.
+#
+
+# Look-ahead expressions
+#
+"(?!0{5})(\d{5})" "<0><1>000011>0>zzzz"
+"(?!0{5})(\d{5})z" "<0><1>000011>z0>zzz"
+"(?!0{5})(\d{5})(?!y)" "<0><1>000011>0>zzzz"
+"abc(?=def)" "<0>abc0>def"
+"(.*)(?=c)" "<0><1>ab1>0>cdef"
+
+"(?:.*)(?=c)" "abcdef"
+"(?:.*)(?=c)" b "<0>ab0>cdef" # transparent bounds
+"(?:.*)(?=c)" bM "<0>ab0>cdef" # transparent bounds
+
+"(?:.*)(?=(c))" b "<0>ab0><1>c1>def" # Capture in look-ahead
+"(?=(.)\1\1)\1" "abcc<0><1>d1>0>ddefg" # Backrefs to look-ahead capture
+
+".(?!\p{L})" "abc<0>d0> " # Negated look-ahead
+".(?!(\p{L}))" "abc<0>d0> " # Negated look-ahead, no capture
+ # visible outside of look-ahead
+"and(?=roid)" L "<0>and0>roid"
+"and(?=roid)" M "android"
+"and(?=roid)" bM "<0>and0>roid"
+
+"and(?!roid)" L "<0>and0>roix"
+"and(?!roid)" L "android"
+
+"and(?!roid)" M "<0>and0>roid" # Opaque bounds
+"and(?!roid)" bM "android"
+"and(?!roid)" bM "<0>and0>roix"
+
+#
+# Negated Lookahead, various regions and region transparency
+#
+"abc(?!def)" "<0>abc0>xyz"
+"abc(?!def)" "abcdef"
+"abc(?!def)" "<0>abc0>def"
+"abc(?!def)" b "abcdef"
+"abc(?!def)" b "<0>abc0>xyz"
+
+#
+# Nested Lookahead / Behind
+#
+"one(?=(?:(?!).)*)" "<0>one0> stuff"
+"one(?=(?:(?!).)*)" "one "
+
+# More nesting lookaround: pattern matches "qq" when not preceded by 'a' and followed by 'z'
+"(?qq0>c"
+"(?qq0>c"
+"(?A1><0>jk0><2>B2>"
+"(?=(?<=(\p{Lu})(?=..(\p{Lu})))).." "ajkB"
+"(?=(?<=(\p{Lu})(?=..(\p{Lu})))).." "Ajkb"
+
+# Nested lookaround cases from bug ICU-20564
+"(?<=(?<=((?=)){0}+))" "<0>0>abc"
+"(?<=c(?<=c((?=c)){1}+))" "c<0><1>1>0>cc"
+
+#
+# Anchoring Bounds
+#
+"^def$" "abc<0>def0>ghi" # anchoring (default) bounds
+"^def$" a "abcdefghi" # non-anchoring bounds
+"^def" a "<0>def0>ghi" # non-anchoring bounds
+"def$" a "abc<0>def0>" # non-anchoring bounds
+
+"^.*$" m "<0>line 10>\n line 2"
+"^.*$" m2 "line 1\n<0> line 20>"
+"^.*$" m3 "line 1\n line 2"
+"^.*$" m "li<0>ne 0>1\n line 2" # anchoring bounds
+"^.*$" m2 "line 1\n line 2" # anchoring bounds
+"^.*$" am "line 1\n line 2" # non-anchoring bounds
+"^.*$" am "li\n<0>ne 0>\n1\n line 2" # non-anchoring bounds
+
+#
+# HitEnd and RequireEnd for new-lines just before end-of-input
+#
+"xyz$" yz "<0>xyz0>\n"
+"xyz$" yz "<0>xyz0>\x{d}\x{a}"
+
+"xyz$" myz "<0>xyz0>" # multi-line mode
+"xyz$" mYZ "<0>xyz0>\n"
+"xyz$" mYZ "<0>xyz0>\r\n"
+"xyz$" mYZ "<0>xyz0>\x{85}abcd"
+
+"xyz$" Yz "xyz\nx"
+"xyz$" Yz "xyza"
+"xyz$" yz "<0>xyz0>"
+
+#
+# HitEnd
+#
+"abcd" Lz "a"
+"abcd" Lz "ab"
+"abcd" Lz "abc"
+"abcd" LZ "<0>abcd0>"
+"abcd" LZ "<0>abcd0>e"
+"abcd" LZ "abcx"
+"abcd" LZ "abx"
+"abcd" Lzi "a"
+"abcd" Lzi "ab"
+"abcd" Lzi "abc"
+"abcd" LZi "<0>abcd0>"
+"abcd" LZi "<0>abcd0>e"
+"abcd" LZi "abcx"
+"abcd" LZi "abx"
+
+#
+# All Unicode line endings recognized.
+# 0a, 0b, 0c, 0d, 0x85, 0x2028, 0x2029
+# Multi-line and non-multiline mode take different paths, so repeated tests.
+#
+"^def$" mYZ "abc\x{a}<0>def0>\x{a}ghi"
+"^def$" mYZ "abc\x{b}<0>def0>\x{b}ghi"
+"^def$" mYZ "abc\x{c}<0>def0>\x{c}ghi"
+"^def$" mYZ "abc\x{d}<0>def0>\x{d}ghi"
+"^def$" mYZ "abc\x{85}<0>def0>\x{85}ghi"
+"^def$" mYZ "abc\x{2028}<0>def0>\x{2028}ghi"
+"^def$" mYZ "abc\x{2029}<0>def0>\x{2029}ghi"
+"^def$" mYZ "abc\r\n<0>def0>\r\nghi"
+
+"^def$" yz "<0>def0>\x{a}"
+"^def$" yz "<0>def0>\x{b}"
+"^def$" yz "<0>def0>\x{c}"
+"^def$" yz "<0>def0>\x{d}"
+"^def$" yz "<0>def0>\x{85}"
+"^def$" yz "<0>def0>\x{2028}"
+"^def$" yz "<0>def0>\x{2029}"
+"^def$" yz "<0>def0>\r\n"
+"^def$" yz "<0>def0>"
+
+
+# "^def$" "<0>def0>\x{2028" #TODO: should be an error of some sort.
+
+#
+# UNIX_LINES mode
+#
+"abc$" D "<0>abc0>\n"
+"abc$" D "abc\r"
+"abc$" D "abc\u0085"
+"a.b" D "<0>a\rb0>"
+"a.b" D "a\nb"
+"(?d)abc$" "<0>abc0>\n"
+"(?d)abc$" "abc\r"
+"abc$" mD "<0>abc0>\ndef"
+"abc$" mD "abc\rdef"
+
+".*def" L "abc\r def xyz" # Normal mode, LookingAt() stops at \r
+".*def" DL "<0>abc\r def0> xyz" # Unix Lines mode, \r not line end.
+".*def" DL "abc\n def xyz"
+
+"(?d)a.b" "a\nb"
+"(?d)a.b" "<0>a\rb0>"
+
+"^abc" m "xyz\r<0>abc0>"
+"^abc" Dm "xyz\rabc"
+"^abc" Dm "xyz\n<0>abc0>"
+
+
+
+# Capturing parens
+".(..)." "<0>a<1>bc1>d0>"
+ ".*\A( +hello)" "<0><1> hello1>0>"
+"(hello)|(goodbye)" "<0><1>hello1>0>"
+"(hello)|(goodbye)" "<0><2>goodbye2>0>"
+"abc( +( inner(X?) +) xyz)" "leading cruft <0>abc<1> <2> inner<3>3> 2> xyz1>0> cruft"
+"\s*([ixsmdt]*)([:letter:]*)" "<0> <1>d1><2>2>0> "
+"(a|b)c*d" "a<0><1>b1>cd0>"
+
+# Non-capturing parens (?: stuff). Groups, but does not capture.
+"(?:abc)*(tail)" "<0>abcabcabc<1>tail1>0>"
+
+# Non-greedy *? quantifier
+".*?(abc)" "<0> abx <1>abc1>0> abc abc abc"
+".*(abc)" "<0> abx abc abc abc <1>abc1>0>"
+
+"((?:abc |xyz )*?)abc " "<0><1>xyz 1>abc 0>abc abc "
+"((?:abc |xyz )*)abc " "<0><1>xyz abc abc 1>abc 0>"
+
+# Non-greedy +? quantifier
+"(a+?)(a*)" "<0><1>a1><2>aaaaaaaaaaaa2>0>"
+"(a+)(a*)" "<0><1>aaaaaaaaaaaaa1><2>2>0>"
+
+"((ab)+?)((ab)*)" "<0><1><2>ab2>1><3>ababababab<4>ab4>3>0>"
+"((ab)+)((ab)*)" "<0><1>abababababab<2>ab2>1><3>3>0>"
+
+# Non-greedy ?? quantifier
+"(ab)(ab)??(ab)??(ab)??(ab)??c" "<0><1>ab1><4>ab4><5>ab5>c0>"
+
+# Unicode Properties as naked elements in a pattern
+"\p{Lu}+" "here we go ... <0>ABC0> and no more."
+"(\p{L}+)(\P{L}*?) (\p{Zs}*)" "7999<0><1>letters1><2>4949%^&*(2> <3> 3>0>"
+
+# \w and \W
+"\w+" " $%^&*( <0>hello1230>%^&*("
+"\W+" "<0> $%^&*( 0>hello123%^&*("
+
+# \A match at beginning of input only.
+ ".*\Ahello" "<0>hello0> hello"
+ ".*hello" "<0>hello hello0>"
+".*\Ahello" "stuff\nhello" # don't match after embedded new-line.
+
+# \b \B
+#
+".*?\b(.).*" "<0> $%^&*( <1>h1>ello123%^&*()gxx0>"
+"\ba\b" "-<0>a0>"
+"\by\b" "xy"
+"[ \b]" "<0>b0>" # in a set, \b is a literal b.
+
+# Finds first chars of up to 5 words
+"(?:.*?\b(\w))?(?:.*?\b(\w))?(?:.*?\b(\w))?(?:.*?\b(\w))?(?:.*?\b(\w))?" "<0><1>T1>the <2>q2>ick <3>b3>rown <4>f4>0>ox"
+
+"H.*?((?:\B.)+)" "<0>H<1>ello1>0> "
+".*?((?:\B.)+).*?((?:\B.)+).*?((?:\B.)+)" "<0>H<1>ello1> <2> 2>g<3>oodbye3>0> "
+
+"(?:.*?\b(.))?(?:.*?\b(.))?(?:.*?\b(.))?(?:.*?\b(.))?(?:.*?\b(.))?.*" "<0> \u0301 \u0301<1>A1>\u0302BC\u0303\u0304<2> 2>\u0305 \u0306<3>X3>\u0307Y\u03080>"
+
+
+#
+# Unicode word boundary mode
+#
+"(?w).*?\b" v "<0>0>hello, world"
+"(?w).*?(\b.+?\b).*" v "<0><1> 1>123.45 0>"
+"(?w).*?(\b\d.*?\b).*" v "<0> <1>123.451> 0>"
+".*?(\b.+?\b).*" "<0> <1>1231>.45 0>"
+"(?w:.*?(\b\d.*?\b).*)" v "<0> <1>123.451> 0>"
+"(?w:.*?(\b.+?\b).*)" v "<0><1>don't1> 0>"
+"(?w:.+?(\b\S.+?\b).*)" v "<0> <1>don't1> 0>"
+"(?w:(\b.+?)(\b.+?)(\b.+?)(\b.+?)(\b.+?)(\b.+?)(\b.+?).*)" v "<0><1>.1><2> 2><3>,3><4>:4><5>$5><6>37,000.506><7> 7> 0>"
+
+#
+# Unicode word boundaries with Regions
+#
+"(?w).*?\b" v "abc<0>def0>ghi"
+"(?w).*?\b" v2 "abcdef<0>0>ghi"
+"(?w).*?\b" v3 "abcdefghi"
+#"(?w).*?\b" vb "abc<0>def0>ghi" # TODO: bug. Ticket 6073
+#"(?w).*?\b" vb2 "abcdefghi"
+
+
+
+# . does not match new-lines
+"." "\u000a\u000d\u0085\u000c\u000b\u2028\u2029<0>X0>\u000aY"
+"A." "A\u000a "# no match
+
+# \d for decimal digits
+"\d*" "<0>0123456789\u0660\u06F9\u0969\u0A66\u17E2\uFF10\U0001D7CE\U0001D7FF0>non-digits"
+"\D+" "<0>non digits0>"
+"\D*(\d*)(\D*)" "<0>non-digits<1>34566661><2>more non digits2>0>"
+
+# \Q...\E quote mode
+"hel\Qlo, worl\Ed" "<0>hello, world0>"
+"\Q$*^^(*)?\A\E(a*)" "<0>$*^^(*)?\\A<1>aaaaaaaaaaaaaaa1>0>"
+"[abc\Q]\r\E]+" "<0>aaaccc]]]\\\\\\0>\r..." # \Q ... \E escape in a [set]
+
+# UREGEX_LITERAL - entire pattern is a literal string, no escapes recognized.
+# Note that data strings in test cases still get escape processing.
+"abc\an\r\E\\abcd\u0031bye" Q "lead<0>abc\\an\\r\\E\\\\abcd\\u0031bye0>extra"
+"case insensitive \\ (l)iteral" Qi "stuff!! <0>cAsE InSenSiTiVE \\\\ (L)ITeral0>"
+
+# \S and \s space characters
+"\s+" "not_space<0> \t \r \n \u3000 \u2004 \u2028 \u20290>xyz"
+"(\S+).*?(\S+).*" "<0><1>Not-spaces1> <2>more-non-spaces2> 0>"
+
+# \X consume one Grapheme Cluster.
+"(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?" v "<0><1>A1><2>B2><3> 3><4>\r\n4>0>"
+"(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?" v "<0><1>A\u03011><2>\n2><3>\u03053><4>a\u0302\u0303\u03044>0>"
+"(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?" v "<0><1>\u1100\u1161\u11a81><2>\u115f\u11a2\u11f92>0>"
+"(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?" v "<0><1>\u1100\uac011><2>\uac022><3>\uac03\u11b03>0>"
+"(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?" v "<0><1>\u1100\u1101\uac02\u03011><2>\u11002>0>"
+# Regional indicator pairs are grapheme clusters
+"(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?" v "<0><1>\U0001f1e6\U0001f1e81><2>\U0001f1ea\U0001f1ff2>0>"
+# Grapheme Break rule 9b: Prepend x
+"(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?(\X)?" v "<0><1>\U000111C2x1>0>"
+
+# Grapheme clusters that straddle a match region. Matching is pinned to the region limits,
+# giving boundaries inside grapheme clusters
+"(\X)?(\X)?(\X)?" v "a\u0301<0><1>\u0301\u03011><2>z\u03022>0>\u0302\u0302"
+# Same as previous test case, but without the region limits.
+"(\X)?(\X)?(\X)?" v "<0><1>a\u0301\u0301\u03011><2>z\u0302\u0302\u03022>0>"
+
+# ^ matches only at beginning of line
+".*^(Hello)" "<0><1>Hello1>0> Hello Hello Hello Goodbye"
+".*(Hello)" "<0>Hello Hello Hello <1>Hello1>0> Goodbye"
+".*^(Hello)" " Hello Hello Hello Hello Goodbye"# No Match
+
+# $ matches only at end of line, or before a newline preceding the end of line
+".*?(Goodbye)$" zy "<0>Hello Goodbye Goodbye <1>Goodbye1>0>"
+".*?(Goodbye)" ZY "<0>Hello <1>Goodbye1>0> Goodbye Goodbye"
+".*?(Goodbye)$" z "Hello Goodbye> Goodbye Goodbye "# No Match
+
+".*?(Goodbye)$" zy "<0>Hello Goodbye Goodbye <1>Goodbye1>0>\n"
+".*?(Goodbye)$" zy "<0>Hello Goodbye Goodbye <1>Goodbye1>0>\n"
+".*?(Goodbye)$" zy "<0>Hello Goodbye Goodbye <1>Goodbye1>0>\r\n"
+".*?(Goodbye)$" z "Hello Goodbye Goodbye Goodbye\n\n"# No Match
+
+# \Z matches at end of input, like $ with default flags.
+".*?(Goodbye)\Z" zy "<0>Hello Goodbye Goodbye <1>Goodbye1>0>"
+".*?(Goodbye)" ZY "<0>Hello <1>Goodbye1>0> Goodbye Goodbye"
+".*?(Goodbye)\Z" z "Hello Goodbye> Goodbye Goodbye "# No Match
+"here$" z "here\nthe end"# No Match
+
+".*?(Goodbye)\Z" "<0>Hello Goodbye Goodbye <1>Goodbye1>0>\n"
+".*?(Goodbye)\Z" "<0>Hello Goodbye Goodbye <1>Goodbye1>0>\n"
+".*?(Goodbye)\Z" "<0>Hello Goodbye Goodbye <1>Goodbye1>0>\r\n"
+".*?(Goodbye)\Z" "Hello Goodbye Goodbye Goodbye\n\n"# No Match
+
+# \z matches only at the end of string.
+# no special treatment of new lines.
+# no dependencies on flag settings.
+".*?(Goodbye)\z" zy "<0>Hello Goodbye Goodbye <1>Goodbye1>0>"
+".*?(Goodbye)\z" z "Hello Goodbye Goodbye Goodbye "# No Match
+"here$" z "here\nthe end"# No Match
+
+".*?(Goodbye)\z" z "Hello Goodbye Goodbye Goodbye\n"# No Match
+".*?(Goodbye)\n\z" zy "<0>Hello Goodbye Goodbye <1>Goodbye1>\n0>"
+"abc\z|def" ZY "abc<0>def0>"
+
+# (?# comment) doesn't muck up pattern
+"Hello (?# this is a comment) world" " <0>Hello world0>..."
+
+# Check some implementation corner cases base on the way literal strings are compiled.
+"A" "<0>A0>"
+"AB" "<0>AB0>ABABAB"
+"AB+" "<0>ABBB0>A"
+"AB+" "<0>AB0>ABAB"
+"ABC+" "<0>ABC0>ABC"
+"ABC+" "<0>ABCCCC0>ABC"
+"(?:ABC)+" "<0>ABCABCABC0>D"
+"(?:ABC)DEF+" "<0>ABCDEFFF0>D"
+"AB\.C\eD\u0666E" "<0>AB.C\u001BD\u0666E0>F"
+"ab\Bde" "<0>abde0>"
+
+# loop breaking
+"(a?)*" "<0><1>1>0>xyz"
+"(a?)+" "<0><1>1>0>xyz"
+"^(?:a?b?)*$" "a--"
+"(x?)*xyz" "<0>xx<1>1>xyz0>" # Sligthtly weird, but correct. The "last" time through (x?),
+ # it matches the empty string.
+
+# Set expressions, basic operators and escapes work
+#
+"[\d]+" "<0>01230>abc/.,"
+"[^\d]+" "0123<0>abc/.,0>"
+"[\D]+" "0123<0>abc/.,0>"
+"[^\D]+" "<0>01230>abc/.,"
+
+"[\s]+" "<0> \t0>abc/.,"
+"[^\s]+" " \t<0>abc/.,0>"
+"[\S]+" " \t<0>abc/.,0>"
+"[^\S]+" "<0> \t0>abc/.,"
+
+"[\w]+" "<0>abc1230> .,;"
+"[^\w]+" "abc123<0> .,;0>"
+"[\W]+" "abc123<0> .,;0>"
+"[^\W]+" "<0>abc1230> .,;"
+
+"[\z]+" "abc<0>zzz0>def" # \z has no special meaning
+"[^\z]+" "<0>abc0>zzzdef"
+"[\^]+" "abc<0>^^0>"
+"[^\^]+" "<0>abc0>^^"
+
+"[\u0041c]+" "<0>AcAc0>def"
+"[\U00010002]+" "<0>\ud800\udc020>\U00010003"
+"[^\U00010002]+" "<0>Hello0>\x{10002}"
+"[\x61b]+" "<0>abab0>cde"
+#"[\x6z]+" "\x06" #TODO: single hex digits should fail
+"[\x{9}\x{75}\x{6d6}\x{6ba6}\x{6146B}\x{10ffe3}]+" "<0>\u0009\u0075\u06d6\u6ba6\U0006146B\U0010ffe30>abc"
+
+"[\N{LATIN CAPITAL LETTER TONE SIX}ab\N{VARIATION SELECTOR-70} ]+" "x<0> \u0184\U000E0135 ab0>c"
+"[\N{LATIN SMALL LETTER C}-\N{LATIN SMALL LETTER F}]+" "ab<0>cdef0>ghi"
+
+
+
+#
+# [set expressions], check the precedence of '-', '&', '--', '&&'
+# '-' and '&', for compatibility with ICU UnicodeSet, have the same
+# precedence as the implicit Union between adjacent items.
+# '--' and '&&', for compatibility with Java, have lower precedence than
+# the implicit Union operations. '--' and '&&' themselves
+# have the same precedence, and group left to right.
+#
+"[[a-m]-[f-w]p]+" "<0>dep0>fgwxyz"
+"[^[a-m]-[f-w]p]+" "dep<0>fgwxyz0>"
+
+"[[a-m]--[f-w]p]+" "<0>de0>pfgwxyz"
+"[^[a-m]--[f-w]p]+" "de<0>pfgwxyz0>"
+
+"[[a-m]&[e-s]w]+" "<0>efmw0>adnst"
+"[^[a-m]&[e-s]w]+" "efmw<0>adnst0>"
+
+"[[a-m]&[e-s]]+" "<0>efm0>adnst"
+
+
+
+# {min,max} iteration qualifier
+"A{3}BC" "<0>AAABC0>"
+
+"(ABC){2,3}AB" "no matchAB"
+"(ABC){2,3}AB" "ABCAB"
+"(ABC){2,3}AB" "<0>ABC<1>ABC1>AB0>"
+"(ABC){2,3}AB" "<0>ABCABC<1>ABC1>AB0>"
+"(ABC){2,3}AB" "<0>ABCABC<1>ABC1>AB0>CAB"
+
+"(ABC){2}AB" "ABCAB"
+"(ABC){2}AB" "<0>ABC<1>ABC1>AB0>"
+"(ABC){2}AB" "<0>ABC<1>ABC1>AB0>CAB"
+"(ABC){2}AB" "<0>ABC<1>ABC1>AB0>CABCAB"
+
+"(ABC){2,}AB" "ABCAB"
+"(ABC){2,}AB" "<0>ABC<1>ABC1>AB0>"
+"(ABC){2,}AB" "<0>ABCABC<1>ABC1>AB0>"
+"(ABC){2,}AB" "<0>ABCABCABC<1>ABC1>AB0>"
+
+"X{0,0}ABC" "<0>ABC0>"
+"X{0,1}ABC" "<0>ABC0>"
+
+"(?:Hello(!{1,3}) there){1}" "Hello there"
+"(?:Hello(!{1,3}) there){1}" "<0>Hello<1>!1> there0>"
+"(?:Hello(!{1,3}) there){1}" "<0>Hello<1>!!1> there0>"
+"(?:Hello(!{1,3}) there){1}" "<0>Hello<1>!!!1> there0>"
+"(?:Hello(!{1,3}) there){1}" "Hello!!!! there"
+
+# Nongreedy {min,max}? intervals
+"(ABC){2,3}?AB" "no matchAB"
+"(ABC){2,3}?AB" "ABCAB"
+"(ABC){2,3}?AB" "<0>ABC<1>ABC1>AB0>"
+"(ABC){2,3}?AB" "<0>ABC<1>ABC1>AB0>CAB"
+"(ABC){2,3}?AB" "<0>ABC<1>ABC1>AB0>CABCAB"
+"(ABC){2,3}?AX" "<0>ABCABC<1>ABC1>AX0>"
+"(ABC){2,3}?AX" "ABC<0>ABCABC<1>ABC1>AX0>"
+
+# Possessive {min,max}+ intervals
+"(ABC){2,3}+ABC" "ABCABCABC"
+"(ABC){1,2}+ABC" "<0>ABC<1>ABC1>ABC0>"
+"(?:(.)\1){2,5}+." "<0>aabbcc<1>d1>de0>x"
+
+
+# Atomic Grouping
+"(?>.*)abc" "abcabcabc" # no match. .* consumed entire string.
+"(?>(abc{2,4}?))(c*)" "<0><1>abcc1><2>ccc2>0>ddd"
+"(\.\d\d(?>[1-9]?))\d+" "1.625"
+"(\.\d\d(?>[1-9]?))\d+" "1<0><1>.6251>00>"
+
+# Possessive *+
+"(abc)*+a" "abcabcabc"
+"(abc)*+a" "<0>abc<1>abc1>a0>b"
+"(a*b)*+a" "<0><1>aaaab1>a0>aaa"
+
+# Possessive ?+
+"c?+ddd" "<0>cddd0>"
+"c?+cddd" "cddd"
+"c?cddd" "<0>cddd0>"
+
+# Back Reference
+"(?:ab(..)cd\1)*" "<0>ab23cd23ab<1>ww1>cdww0>abxxcdyy"
+"ab(?:c|(d?))(\1)" "<0>ab<1><2>2>1>0>c"
+"ab(?:c|(d?))(\1)" "<0>ab<1>d1><2>d2>0>"
+"ab(?:c|(d?))(\1)" "<0>ab<1>1><2>2>0>e"
+"ab(?:c|(d?))(\1)" "<0>ab<1>1><2>2>0>"
+
+# Back References that hit/don't hit end
+"(abcd) \1" z "abcd abc"
+"(abcd) \1" Z "<0><1>abcd1> abcd0>"
+"(abcd) \1" Z "<0><1>abcd1> abcd0> "
+
+# Case Insensitive back references that hit/don't hit end.
+"(abcd) \1" zi "abcd abc"
+"(abcd) \1" Zi "<0><1>abcd1> ABCD0>"
+"(abcd) \1" Zi "<0><1>abcd1> ABCD0> "
+
+# Back references that hit/don't hit boundary limits.
+
+"(abcd) \1" z "abcd abcd "
+"(abcd) \1" Z "<0><1>abcd1> abcd0> "
+"(abcd) \1" Z "<0><1>abcd1> abcd0> "
+
+"(abcd) \1" zi "abcd abcd "
+"(abcd) \1" Zi "<0><1>abcd1> abcd0> "
+"(abcd) \1" Zi "<0><1>abcd1> abcd0> "
+
+# Back reference that fails match near the end of input without actually hitting the end.
+"(abcd) \1" ZL "abcd abd"
+"(abcd) \1" ZLi "abcd abd"
+
+# Back reference to a zero-length match. They are always a successful match.
+"ab(x?)cd(\1)ef" "<0>ab<1>1>cd<2>2>ef0>"
+"ab(x?)cd(\1)ef" i "<0>ab<1>1>cd<2>2>ef0>"
+
+# Back refs to capture groups that didn't participate in the match.
+"ab(?:(c)|(d))\1" "abde"
+"ab(?:(c)|(d))\1" "<0>ab<1>c1>c0>e"
+"ab(?:(c)|(d))\1" i "abde"
+"ab(?:(c)|(d))\1" i "<0>ab<1>c1>c0>e"
+
+# Named back references
+"(?abcd)\k" "<0><1>abcd1>abcd0>"
+"(no)?(?abcd)\k" "<0><2>abcd2>abcd0>"
+
+"(?...)" E " " # backref names are ascii letters & numbers only"
+"(?<1a>...)" E " " # backref names must begin with a letter"
+"(?.)(?.)" E " " # Repeated names are illegal.
+
+
+# Case Insensitive
+"aBc" i "<0>ABC0>"
+"a[^bc]d" i "ABD"
+'((((((((((a))))))))))\10' i "<0><1><2><3><4><5><6><7><8><9><10>A10>9>8>7>6>5>4>3>2>1>A0>"
+
+"(?:(?i)a)b" "<0>Ab0>"
+"ab(?i)cd" "<0>abCd0>"
+"ab$cd" "abcd"
+
+"ssl" i "abc<0>ßl0>xyz"
+"ssl" i "abc<0>ẞl0>xyz"
+"FIND" i "can <0>find0> ?" # fi ligature, \ufb01
+"find" i "can <0>FIND0> ?"
+"ῧ" i "xxx<0>ῧ0>xxx" # Composed char (match string) decomposes when case-folded (pattern)
+
+# White space handling
+"a b" "ab"
+"abc " "abc"
+"abc " "<0>abc 0>"
+"ab[cd e]z" "<0>ab z0>"
+"ab\ c" "<0>ab c0> "
+"ab c" "<0>ab c0> "
+"ab c" x "ab c "
+"ab\ c" x "<0>ab c0> "
+
+#
+# Pattern Flags
+#
+"(?u)abc" "<0>abc0>"
+"(?-u)abc" "<0>abc0>"
+
+#
+# \c escapes (Control-whatever)
+#
+"\cA" "<0>\u00010>"
+"\ca" "<0>\u00010>"
+"\c\x" "<0>\u001cx0>"
+
+
+#Multi-line mode
+'b\s^' m "a\nb\n"
+"(?m)^abc$" "abc \n abc\n<0>abc0>\nabc"
+"(?m)^abc$" 2 "abc \n abc\nabc\n<0>abc0>"
+"^abc$" 2 "abc \n abc\nabc\nabc"
+
+# Empty and full range
+"[\u0000-\U0010ffff]+" "<0>abc\u0000\uffff\U00010000\U0010ffffzz0>"
+"[^\u0000-\U0010ffff]" "abc\u0000\uffff\U00010000\U0010ffffzz"
+"[^a--a]+" "<0>abc\u0000\uffff\U00010000\U0010ffffzz0>"
+
+# Free-spacing mode
+"a b c # this is a comment" x "<0>abc0> "
+'^a (?#xxx) (?#yyy) {3}c' x "<0>aaac0>"
+"a b c [x y z]" x "abc "
+"a b c [x y z]" x "a b c "
+"a b c [x y z]" x "<0>abcx0>yz"
+"a b c [x y z]" x "<0>abcy0>yz"
+
+#
+# Look Behind
+#
+"(?<=a)b" "a<0>b0>"
+"(.*)(?<=[bc])" "<0><1>abc1>0>d"
+"(?<=(abc))def" "<1>abc1><0>def0>" # lookbehind precedes main match.
+"(?<=ab|abc)xyz" "abwxyz" # ab matches, but not far enough.
+"(?<=abc)cde" "abcde"
+"(?<=abc|ab)cde" "ab<0>cde0>"
+"(?<=abc|ab)cde" "abc<0>cde0>"
+
+"(?<=bc?c?c?)cd" "ab<0>cd0>"
+"(?<=bc?c?c?)cd" "abc<0>cd0>"
+"(?<=bc?c?c?)cd" "abcc<0>cd0>"
+"(?<=bc?c?c?)cd" "abccc<0>cd0>"
+"(?<=bc?c?c?)cd" "abcccccd"
+"(?<=bc?c?c?)c+d" "ab<0>cccccd0>"
+
+".*(?<=: ?)(\w*)" "<0>1:one 2: two 3:<1>three1>0> "
+
+#
+# Named Characters
+#
+"a\N{LATIN SMALL LETTER B}c" "<0>abc0>"
+"a\N{LATIN SMALL LETTER B}c" i "<0>abc0>"
+"a\N{LATIN SMALL LETTER B}c" i "<0>aBc0>"
+"a\N{LATIN SMALL LETTER B}c" "aBc"
+
+"\N{FULL STOP}*" "<0>...0>abc"
+
+"$" "abc<0>0>"
+
+#
+# Optimizations of .* at end of patterns
+#
+"abc.*" "<0>abcdef0>"
+"abc.*$" "<0>abcdef0>"
+"abc(.*)" "<0>abc<1>def1>0>"
+"abc(.*)" "<0>abc<1>1>0>"
+"abc.*" "<0>abc0>\ndef"
+"abc.*" s "<0>abc\ndef0>"
+"abc.*$" s "<0>abc\ndef0>"
+"abc.*$" "abc\ndef"
+"abc.*$" m "<0>abc0>\ndef"
+"abc.*\Z" m "abc\ndef"
+"abc.*\Z" sm "<0>abc\ndef0>"
+
+"abc*" "<0>abccc0>d"
+"abc*$" "<0>abccc0>"
+"ab(?:ab[xyz]\s)*" "<0>ababy abx 0>abc"
+
+"(?:(abc)|a)(?:bc)+" "<0>abc0>"
+"(?:(abc)|a)(?:bc)*" "<0><1>abc1>0>"
+"^[+\-]?[0-9]*\.?[0-9]*" "<0>123.4560>"
+
+"ab.+yz" "<0>abc12345xyz0>ttt"
+"ab.+yz" s "<0>abc12345xyz0>ttt"
+
+"ab.+yz" "abc123\n45xyzttt"
+"ab.+yz" s "<0>abc12\n345xyz0>ttt"
+
+"ab[0-9]+yz" "---abyz+++"
+"ab[0-9]+yz" "---<0>ab1yz0>+++"
+"ab[0-9]+yz" "---<0>ab12yz0>+++"
+"ab[0-9]+yz" "---<0>ab123456yz0>+++"
+
+"ab([0-9]+|[A-Z]+)yz" "---abyz+++"
+"ab([0-9]+|[A-Z]+)yz" "---<0>ab<1>11>yz0>+++"
+"ab([0-9]+|[A-Z]+)yz" "---<0>ab<1>121>yz0>+++"
+"ab([0-9]+|[A-Z]+)yz" "---<0>ab<1>A1>yz0>+++"
+"ab([0-9]+|[A-Z]+)yz" "---<0>ab<1>AB1>yz0>+++"
+"ab([0-9]+|[A-Z]+)yz" "---<0>ab<1>ABCDE1>yz0>+++"
+
+#
+# Hex format \x escaping
+#
+"ab\x63" "<0>abc0>"
+"ab\x09w" "<0>ab\u0009w0>"
+"ab\xabcdc" "<0>ab\u00abcdc0>"
+"ab\x{abcd}c" "<0>ab\uabcdc0>"
+"ab\x{101234}c" "<0>ab\U00101234c0>"
+"abα" "<0>abα0>"
+
+#
+# Octal Escaping. This conforms to Java conventions, not Perl.
+"\0101\00\03\073\0154\01442" "<0>A\u0000\u0003\u003b\u006c\u0064\u00320>"
+"\0776" "<0>\u003f\u00360>" # overflow, the 6 is literal.
+"\0376xyz" "<0>\u00fexyz0>"
+"\08" E "<0>\u000080>"
+"\0" E "x"
+
+#
+# \u Surrogate Pairs
+#
+"\ud800\udc00" "<0>\U000100000>"
+"\ud800\udc00*" "<0>\U00010000\U00010000\U000100000>\U00010001"
+# TODO (Vitess): The next case has invalid UTF-8, so it's not supported right now for testing. It likely works in practice though!
+# "\ud800\ud800\udc00" "<0>\ud800\U000100000>\U00010000\U00010000\U00010001"
+"(\ud800)(\udc00)" "\U00010000"
+"\U00010001+" "<0>\U00010001\U000100010>\udc01"
+
+#
+# hitEnd with find()
+#
+"abc" Z "aa<0>abc0> abcab"
+"abc" 2Z "aaabc <0>abc0>ab"
+"abc" 3z "aa>abc abcab"
+
+#
+# \ escaping
+#
+"abc\jkl" "<0>abcjkl0>" # escape of a non-special letter is just itself.
+"abc[ \j]kl" "<0>abcjkl0>"
+
+#
+# \R all newline sequences.
+#
+"abc\Rxyz" "<0>abc\u000axyz0>gh"
+"abc\Rxyz" "<0>abc\u000bxyz0>gh"
+"abc\Rxyz" "<0>abc\u000cxyz0>gh"
+"abc\Rxyz" "<0>abc\u000dxyz0>gh"
+"abc\Rxyz" "<0>abc\u0085xyz0>gh"
+"abc\Rxyz" "<0>abc\u2028xyz0>gh"
+"abc\Rxyz" "<0>abc\u2029xyz0>gh"
+"abc\Rxyz" "<0>abc\u000d\u000axyz0>gh"
+
+"abc\R\nxyz" "abc\u000d\u000axyzgh" # \R cannot match only the CR from a CR/LF sequence.
+"abc\r\nxyz" "<0>abc\u000d\u000axyz0>gh"
+
+"abc\Rxyz" "abc\u0009xyz" # Assorted non-matches.
+"abc\Rxyz" "abc\u000exyz"
+"abc\Rxyz" "abc\u202axyz"
+
+# \v \V single character new line sequences.
+
+"abc\vxyz" "<0>abc\u000axyz0>gh"
+"abc\vxyz" "<0>abc\u000bxyz0>gh"
+"abc\vxyz" "<0>abc\u000cxyz0>gh"
+"abc\vxyz" "<0>abc\u000dxyz0>gh"
+"abc\vxyz" "<0>abc\u0085xyz0>gh"
+"abc\vxyz" "<0>abc\u2028xyz0>gh"
+"abc\vxyz" "<0>abc\u2029xyz0>gh"
+"abc\vxyz" "abc\u000d\u000axyzgh"
+"abc\vxyz" "abc?xyzgh"
+
+"abc[\v]xyz" "<0>abc\u000axyz0>gh"
+"abc[\v]xyz" "<0>abc\u000bxyz0>gh"
+"abc[\v]xyz" "<0>abc\u000cxyz0>gh"
+"abc[\v]xyz" "<0>abc\u000dxyz0>gh"
+"abc[\v]xyz" "<0>abc\u0085xyz0>gh"
+"abc[\v]xyz" "<0>abc\u2028xyz0>gh"
+"abc[\v]xyz" "<0>abc\u2029xyz0>gh"
+"abc[\v]xyz" "abc\u000d\u000axyzgh"
+"abc[\v]xyz" "abc?xyzgh"
+
+"abc\Vxyz" "abc\u000axyzgh"
+"abc\Vxyz" "abc\u000bxyzgh"
+"abc\Vxyz" "abc\u000cxyzgh"
+"abc\Vxyz" "abc\u000dxyzgh"
+"abc\Vxyz" "abc\u0085xyzgh"
+"abc\Vxyz" "abc\u2028xyzgh"
+"abc\Vxyz" "abc\u2029xyzgh"
+"abc\Vxyz" "abc\u000d\u000axyzgh"
+"abc\Vxyz" "<0>abc?xyz0>gh"
+
+# \h \H horizontal white space. Defined as gc=space_separator plus ascii tab
+
+"abc\hxyz" "<0>abc xyz0>gh"
+"abc\Hxyz" "abc xyzgh"
+"abc\hxyz" "<0>abc\u2003xyz0>gh"
+"abc\Hxyz" "abc\u2003xyzgh"
+"abc\hxyz" "<0>abc\u0009xyz0>gh"
+"abc\Hxyz" "abc\u0009xyzgh"
+"abc\hxyz" "abc?xyzgh"
+"abc\Hxyz" "<0>abc?xyz0>gh"
+
+"abc[\h]xyz" "<0>abc xyz0>gh"
+"abc[\H]xyz" "abc xyzgh"
+"abc[\h]xyz" "<0>abc\u2003xyz0>gh"
+"abc[\H]xyz" "abc\u2003xyzgh"
+"abc[\h]xyz" "<0>abc\u0009xyz0>gh"
+"abc[\H]xyz" "abc\u0009xyzgh"
+"abc[\h]xyz" "abc?xyzgh"
+"abc[\H]xyz" "<0>abc?xyz0>gh"
+
+
+#
+# Bug xxxx
+#
+"(?:\-|(\-?\d+\d\d\d))?(?:\-|\-(\d\d))?(?:\-|\-(\d\d))?(T)?(?:(\d\d):(\d\d):(\d\d)(\.\d+)?)?(?:(?:((?:\+|\-)\d\d):(\d\d))|(Z))?" MG "<0>-1234-21-31T41:51:61.789+71:810>"
+
+
+#
+# A random, complex, meaningless pattern that should at least compile
+#
+"(?![^\\G)(?![^|\]\070\ne\{\t\[\053\?\\\x51\a\075\0023-\[&&[|\022-\xEA\00-\u41C2&&[^|a-\xCC&&[^\037\uECB3\u3D9A\x31\|\[^\016\r\{\,\uA29D\034\02[\02-\[|\t\056\uF599\x62\e\<\032\uF0AC\0026\0205Q\|\\\06\0164[|\057-\u7A98&&[\061-g|\|\0276\n\042\011\e\xE8\x64B\04\u6D0EDW^\p{Lower}]]]]?)(?<=[^\n\\\t\u8E13\,\0114\u656E\xA5\]&&[\03-\026|\uF39D\01\{i\u3BC2\u14FE]])(?<=[^|\uAE62\054H\|\}&&^\p{Space}])(?sxx)(?<=[\f\006\a\r\xB4]{1,5})|(?x-xd:^{5}+)()" "<0>0>abc"
+
+
+#
+# Bug 3225
+
+"1|9" "<0>10>"
+"1|9" "<0>90>"
+"1*|9" "<0>10>"
+"1*|9" "<0>0>9"
+
+"(?:a|ac)d" "<0>acd0>"
+"a|ac" "<0>a0>c"
+
+#
+# Bug 3320
+#
+"(a([^ ]+)){0,} (c)" "<0><1>a<2>b2>1> <3>c3>0> "
+"(a([^ ]+))* (c)" "<0><1>a<2>b2>1> <3>c3>0> "
+
+#
+# Bug 3436
+#
+"(.*?) *$" "<0><1>test1> 0>"
+
+#
+# Bug 4034
+#
+"\D" "<0>A0>BC\u00ffDEF"
+"\d" "ABC\u00ffDEF"
+"\D" "<0>\u00ff0>DEF"
+"\d" "\u00ffDEF"
+"\D" "123<0>\u00ff0>DEF"
+"\D" "<0>\u01000>DEF"
+"\D" "123<0>\u01000>DEF"
+
+#
+#bug 4024, new line sequence handling
+#
+"(?m)^" "<0>0>AA\u000d\u000aBB\u000d\u000aCC\u000d\u000a"
+"(?m)^" 2 "AA\u000d\u000a<0>0>BB\u000d\u000aCC\u000d\u000a"
+"(?m)^" 3 "AA\u000d\u000aBB\u000d\u000a<0>0>CC\u000d\u000a"
+"(?m)^" 4 "AA\u000d\u000aBB\u000d\u000aCC\u000d\u000a"
+
+"(?m)$" "AA<0>0>\u000d\u000aBB\u000d\u000aCC\u000d\u000a"
+"(?m)$" 2 "AA\u000d\u000aBB<0>0>\u000d\u000aCC\u000d\u000a"
+"(?m)$" 3 "AA\u000d\u000aBB\u000d\u000aCC<0>0>\u000d\u000a"
+"(?m)$" 4 "AA\u000d\u000aBB\u000d\u000aCC\u000d\u000a<0>0>"
+"(?m)$" 5 "AA\u000d\u000aBB\u000d\u000aCC\u000d\u000a"
+
+"$" "AA\u000d\u000aBB\u000d\u000aCC<0>0>\u000d\u000a"
+"$" 2 "AA\u000d\u000aBB\u000d\u000aCC\u000d\u000a<0>0>"
+"$" 3 "AA\u000d\u000aBB\u000d\u000aCC\u000d\u000a"
+
+"$" "\u000a\u0000a<0>0>\u000a"
+"$" 2 "\u000a\u0000a\u000a<0>0>"
+"$" 3 "\u000a\u0000a\u000a"
+
+"$" "<0>0>"
+"$" 2 ""
+
+"$" "<0>0>\u000a"
+"$" 2 "\u000a<0>0>"
+"$" 3 "\u000a"
+
+"^" "<0>0>"
+"^" 2 ""
+
+"\Z" "<0>0>"
+"\Z" 2 ""
+"\Z" 2 "\u000a<0>0>"
+"\Z" "<0>0>\u000d\u000a"
+"\Z" 2 "\u000d\u000a<0>0>"
+
+
+# No matching ^ at interior new-lines if not in multi-line mode.
+"^" "<0>0>AA\u000d\u000aBB\u000d\u000aCC\u000d\u000a"
+"^" 2 "AA\u000d\u000aBB\u000d\u000aCC\u000d\u000a"
+
+#
+# Dot-matches-any mode, and stopping at new-lines if off.
+#
+"." "<0>10>23\u000aXYZ"
+"." 2 "1<0>20>3\u000aXYZ"
+"." 3 "12<0>30>\u000aXYZ"
+"." 4 "123\u000a<0>X0>YZ" # . doesn't match newlines
+"." 4 "123\u000b<0>X0>YZ"
+"." 4 "123\u000c<0>X0>YZ"
+"." 4 "123\u000d<0>X0>YZ"
+"." 4 "123\u000d\u000a<0>X0>YZ"
+"." 4 "123\u0085<0>X0>YZ"
+"." 4 "123\u2028<0>X0>YZ"
+"." 4 "123\u2029<0>X0>YZ"
+"." 4s "123<0>\u000a0>XYZ" # . matches any
+"." 4s "123<0>\u000b0>XYZ"
+"." 4s "123<0>\u000c0>XYZ"
+"." 4s "123<0>\u000d0>XYZ"
+"." 4s "123<0>\u000d\u000a0>XYZ"
+"." 4s "123<0>\u00850>XYZ"
+"." 4s "123<0>\u20280>XYZ"
+"." 4s "123<0>\u20290>XYZ"
+".{6}" "123\u000a\u000dXYZ"
+".{6}" s "<0>123\u000a\u000dX0>Y"
+
+
+#
+# Ranges
+#
+".*" "abc<0>def0>ghi"
+"a" "aaa<0>a0>aaaaa"
+"a" 2 "aaaa<0>a0>aaaa"
+"a" 3 "aaaaa<0>a0>aaa"
+"a" 4 "aaaaaaaaa"
+"a" "aaa<0>a0>aaaaa"
+
+#
+# [set] parsing, systematically run through all of the parser states.
+#
+#
+"[def]+" "abc<0>ddeeff0>ghi" # set-open
+"[^def]+" "<0>abc0>defghi"
+"[:digit:]+" "abc<0>1230>def"
+"[:^digit:]+" "<0>abc0>123def"
+"[\u005edef]+" "abc<0>de^f0>ghi"
+
+"[]]+" "abc<0>]]]0>[def" # set-open2
+"[^]]+" "<0>abc0>]]][def"
+
+"[:Lu:]+" "abc<0>ABC0>def" # set-posix
+"[:Lu]+" "abc<0>uL::Lu0>"
+"[:^Lu]+" "abc<0>uL:^:Lu0>"
+"[:]+" "abc<0>:::0>def"
+"[:whats this:]" E " "
+"[--]+" dE "-------"
+
+"[[nested]]+" "xyz[<0>nnetsteed0>]abc" #set-start
+"[\x{41}]+" "CB<0>AA0>ZYX"
+"[\[\]\\]+" "&*<0>[]\\0>..."
+"[*({<]+" "^&<0>{{(<<*0>)))"
+
+
+"[-def]+" "abc<0>def-ef-d0>xyz" # set-start-dash
+"[abc[--def]]" E " "
+
+"[x[&def]]+" "abc<0>def&0>ghi" # set-start-amp
+"[&& is bad at start]" E " "
+
+"[abc" E " " # set-after-lit
+"[def]]" "abcdef"
+"[def]]" "abcde<0>f]0>]"
+
+"[[def][ghi]]+" "abc]<0>defghi0>[xyz" # set-after-set
+"[[def]ghi]+" "abc]<0>defghi0>[xyz"
+"[[[[[[[[[[[abc]" E " "
+"[[abc]\p{Lu}]+" "def<0>abcABC0>xyz"
+
+"[d-f]+" "abc<0>def0>ghi" # set-after-range
+"[d-f[x-z]]+" "abc<0>defxyzzz0>gw"
+"[\s\d]+" "abc<0> 1230>def"
+"[d-f\d]+" "abc<0>def1230>ghi"
+"[d-fr-t]+" "abc<0>defrst0>uvw"
+
+"[abc--]" E " " # set-after-op
+"[[def]&&]" E " "
+"[-abcd---]+" "<0>abc0>--" #[-abcd]--[-]
+"[&abcd&&&ac]+" "b<0>ac&&ca0>d" #[&abcd]&&[&ac]
+
+"[[abcd]&[ac]]+" "b<0>acac0>d" # set-set-amp
+"[[abcd]&&[ac]]+" "b<0>acac0>d"
+"[[abcd]&&ac]+" "b<0>acac0>d"
+"[[abcd]&ac]+" "<0>bacacd&&&0>"
+
+"[abcd&[ac]]+" "<0>bacacd&&&0>" #set-lit-amp
+"[abcd&&[ac]]+" "b<0>acac0>d"
+"[abcd&&ac]+" "b<0>acac0>d"
+
+"[[abcd]-[ac]]+" "a<0>bdbd0>c" # set-set-dash
+"[[abcd]--[ac]]+" "a<0>bdbd0>c"
+"[[abcd]--ac]+" "a<0>bdbd0>c"
+"[[abcd]-ac]+" "<0>bacacd---0>"
+
+"[a-d--[b-c]]+" "b<0>adad0>c" # set-range-dash
+"[a-d--b-c]+" "b<0>adad0>c"
+"[a-d-[b-c]]+" "<0>bad-adc0>"
+"[a-d-b-c]+" "<0>bad-adc0>"
+"[\w--[b-c]]+" "b<0>adad0>c"
+"[\w--b-c]+" "b<0>adad0>c"
+"[\w-[b-c]]+" "<0>bad-adc0>"
+"[\w-b-c]+" "<0>bad-adc0>"
+
+"[a-d&&[b-c]]+" "a<0>bcbc0>d" # set-range-amp
+"[a-d&&b-c]+" "a<0>bcbc0>d"
+"[a-d&[b-c]]+" "<0>abc&bcd0>"
+"[a-d&b-c]+" "<0>abc&bcd0>"
+
+"[abcd--bc]+" "b<0>adda0>c" # set-lit-dash
+"[abcd--[bc]]+" "b<0>adda0>c"
+"[abcd-[bc]]+" "<0>bad--dac0>xyz"
+"[abcd-]+" "<0>bad--dac0>xyz"
+
+"[abcd-\s]+" E "xyz<0>abcd --0>xyz" # set-lit-dash-esc
+"[abcd-\N{LATIN SMALL LETTER G}]+" "xyz-<0>abcdefg0>hij-"
+"[bcd-\{]+" "a<0>bcdefyz{0>|}"
+
+"[\p{Ll}]+" "ABC<0>abc0>^&*&" # set-escape
+"[\P{Ll}]+" "abc<0>ABC^&*&0>xyz"
+"[\N{LATIN SMALL LETTER Q}]+" "mnop<0>qqq0>rst"
+"[\sa]+" "cb<0>a a 0>(*&"
+"[\S]+" " <0>hello0> "
+"[\w]+" " <0>hello_world0>! "
+"[\W]+" "a<0> *$%#,0>hello "
+"[\d]+" "abc<0>1230>def"
+"[\D]+" "123<0>abc0>567"
+"[\$\#]+" "123<0>$#$#0>\\"
+
+#
+# Try each of the Java compatibility properties.
+# These are checked here, while normal Unicode properties aren't, because
+# these Java compatibility properties are implemented directly by regexp, while other
+# properties are handled by ICU's Property and UnicodeSet APIs.
+#
+# These tests are only to verify that the names are recognized and the
+# implementation isn't dead. They are not intended to verify that the
+# function definitions are 100% correct.
+#
+"[:InBasic Latin:]+" "ΓΔΕΖΗΘ<0>hello, world.0>ニヌネノハバパ"
+"[:^InBasic Latin:]+" "<0>ΓΔΕΖΗΘ0>hello, world.ニヌネノハバパ"
+"\p{InBasicLatin}+" "ΓΔΕΖΗΘ<0>hello, world.0>ニヌネノハバパ"
+"\P{InBasicLatin}+" "<0>ΓΔΕΖΗΘ0>hello, world.ニヌネノハバパ"
+"\p{InGreek}+" "<0>ΓΔΕΖΗΘ0>hello, world.ニヌネノハバパ"
+"\p{InCombining Marks for Symbols}" "<0>\u20d00>"
+"\p{Incombiningmarksforsymbols}" "<0>\u20d00>"
+
+
+"\p{javaDefined}+" "\uffff<0>abcd0>\U00045678"
+"\p{javaDigit}+" "abc<0>12340>xyz"
+"\p{javaIdentifierIgnorable}+" "abc<0>\u0000\u000e\u009f0>xyz"
+"\p{javaISOControl}+" "abc<0>\u0000\u000d\u00830>xyz"
+"\p{javaJavaIdentifierPart}+" "#@!<0>abc123_$0>;"
+"\p{javaJavaIdentifierStart}+" "123\u0301<0>abc$_0>%^&"
+"\p{javaLetter}+" "123<0>abcDEF0>&*()("
+"\p{javaLetterOrDigit}+" "$%^&*<0>123abcகஙசஜஞ0>☺♘♚☔☎♬⚄⚡"
+"\p{javaLowerCase}+" "ABC<0>def0>&^%#:="
+"\p{javaMirrored}+" "ab$%<0>(){}[]0>xyz"
+"\p{javaSpaceChar}+" "abc<0> \u00a0\u20280>!@#"
+"\p{javaSupplementaryCodePoint}+" "abc\uffff<0>\U00010000\U0010ffff0>\u0000"
+"\p{javaTitleCase}+" "abCE<0>Džῌᾨ0>123"
+"\p{javaUnicodeIdentifierStart}+" "123<0>abcⅣ0>%^&&*"
+"\p{javaUnicodeIdentifierPart}+" "%&&^<0>abc123\u0301\u00020>..."
+"\p{javaUpperCase}+" "abc<0>ABC0>123"
+"\p{javaValidCodePoint}+" "<0>\u0000abc\ud800 unpaired \udfff |\U0010ffff0>"
+"\p{javaWhitespace}+" "abc\u00a0\u2007\u202f<0> \u0009\u001c\u001f\u20280>42"
+"\p{all}+" "<0>123\u0000\U0010ffff0>"
+"\P{all}+" "123\u0000\U0010ffff"
+
+# [:word:] is implemented directly by regexp. Not a java compat property, but PCRE and others.
+
+"[:word:]+" ".??$<0>abc123ΓΔΕΖΗ_0>%%%"
+"\P{WORD}+" "<0>.??$0>abc123ΓΔΕΖΗ_%%%"
+
+#
+# Errors on unrecognized ASCII letter escape sequences.
+#
+"[abc\Y]+" "<0>abcY0>"
+"[abc\Y]+" eE "<0>abcY0>"
+
+"(?:a|b|c|\Y)+" "<0>abcY0>"
+"(?:a|b|c|\Y)+" eE "<0>abcY0>"
+
+"\Q\Y\E" e "<0>\\Y0>"
+
+#
+# Reported problem
+#
+"[a-\w]" E "x"
+
+#
+# Bug 4045
+#
+"A*" "<0>AAAA0>"
+"A*" 2 "AAAA<0>0>"
+"A*" 3 "AAAA"
+"A*" 4 "AAAA"
+"A*" 5 "AAAA"
+"A*" 6 "AAAA"
+"A*" "<0>0>"
+"A*" 2 ""
+"A*" 3 ""
+"A*" 4 ""
+"A*" 5 ""
+
+#
+# Bug 4046
+#
+"(?m)^" "<0>0>AA\u000dBB\u000dCC\u000d"
+"(?m)^" 2 "AA\u000d<0>0>BB\u000dCC\u000d"
+"(?m)^" 3 "AA\u000dBB\u000d<0>0>CC\u000d"
+"(?m)^" 4 "AA\u000dBB\u000dCC\u000d"
+"(?m)^" 5 "AA\u000dBB\u000dCC\u000d"
+"(?m)^" 6 "AA\u000dBB\u000dCC\u000d"
+
+"(?m)^" "<0>0>AA\u000d\u000aBB\u000d\u000aCC\u000d\u000a"
+"(?m)^" 2 "AA\u000d\u000a<0>0>BB\u000d\u000aCC\u000d\u000a"
+"(?m)^" 3 "AA\u000d\u000aBB\u000d\u000a<0>0>CC\u000d\u000a"
+"(?m)^" 4 "AA\u000d\u000aBB\u000d\u000aCC\u000d\u000a"
+
+#
+# Bug 4059
+#
+"\w+" "<0>イチロー0>"
+"\b....\b." "<0>イチロー?0>"
+
+
+#
+# Bug 4058 ICU Unicode Set patterns have an odd feature -
+# A $ as the last character before the close bracket means match
+# a \uffff, which means off the end of the string in transliterators.
+# Didn't make sense for regular expressions, and is now fixed.
+#
+"[\$](P|C|D);" "<0>$<1>P1>;0>"
+"[$](P|C|D);" "<0>$<1>P1>;0>"
+"[$$](P|C|D);" "<0>$<1>P1>;0>"
+
+#
+# bug 4888 Flag settings lost in some cases.
+#
+"((a){2})|(#)" is "no"
+"((a){2})|(#)" is "<0><1>a<2>a2>1>0>#"
+"((a){2})|(#)" is "a<0><3>#3>0>"
+
+"((a|b){2})|c" is "<0>c0>"
+"((a|b){2})|c" is "<0>C0>"
+"((a|b){2})|c" s "C"
+
+#
+# bug 5617 ZWJ \u200d shouldn't cause word boundaries
+#
+".+?\b" "<0> 0>\u0935\u0915\u094D\u200D\u0924\u0947 "
+".+?\b" 2 " <0>\u0935\u0915\u094D\u200D\u0924\u09470> "
+".+?\b" 3 " \u0935\u0915\u094D\u200D\u0924\u0947 "
+
+#
+# bug 5386 "^.*$" should match empty input
+#
+"^.*$" "<0>0>"
+"^.*$" m "<0>0>"
+"^.*$" "<0>0>\n"
+"(?s)^.*$" "<0>\n0>"
+
+#
+# bug 5386 Empty pattern and empty input should match.
+#
+"" "<0>0>abc"
+"" "<0>0>"
+
+#
+# bug 5386 Range upper and lower bounds can be equal
+#
+"[a-a]" "<0>a0>"
+
+#
+# bug 5386 $* should not fail, should match empty string.
+#
+"$*" "<0>0>abc"
+
+#
+# bug 5386 \Q ... \E escaping problem
+#
+"[a-z\Q-$\E]+" "QE<0>abc-def$0>."
+
+# More reported 5386 Java comaptibility failures
+#
+"[^]*abb]*" "<0>kkkk0>"
+"\xa" "huh" # Java would like to be warned.
+"^.*$" "<0>0>"
+
+#
+# bug 5386 Empty left alternation should produce a zero length match.
+#
+"|a" "<0>0>a"
+"$|ab" "<0>ab0>"
+"$|ba" "ab<0>0>"
+
+#
+# bug 5386 Java compatibility for set expressions
+#
+"[a-z&&[cde]]+" "ab<0>cde0>fg"
+
+#
+# bug 6019 matches() needs to backtrack and check for a longer match if the
+# first match(es) found don't match the entire input.
+#
+"a?|b" "<0>0>b"
+"a?|b" M "<0>b0>"
+"a?|.*?u|stuff|d" M "<0>stuff0>"
+"a?|.*?(u)|stuff|d" M "<0>stuff<1>u1>0>"
+"a+?" "<0>a0>aaaaaaaaaaaa"
+"a+?" M "<0>aaaaaaaaaaaaa0>"
+
+#
+# Bug 7724. Expression to validate zip codes.
+#
+"(?!0{5})(\d{5})(?!-?0{4})(-?\d{4})?" "<0><1>940401><2>-33442>0>"
+"(?!0{5})(\d{5})(?!-?0{4})(-?\d{4})?" "94040-0000"
+"(?!0{5})(\d{5})(?!-?0{4})(-?\d{4})?" "00000-3344"
+
+#
+# Bug 8666. Assertion failure on match, bad operand to JMP_SAV_X opcode.
+#
+"((.??)+|A)*" "<0><1><2>2>1>0>AAAAABBBBBCCCCCDDDDEEEEE"
+
+#
+# Bug 8826. Incorrect results with case insensitive matches.
+#
+"AS(X)" i "aßx"
+"AS.*" i "aßx" # Expansion of sharp s can't split between pattern terms.
+"ASßS" i "<0>aßß0>" # All one literal string, does match.
+"ASß{1}S" i "aßß" # Pattern with terms, no match.
+"aßx" i "<0>assx0>"
+"aßx" i "<0>ASSX0>"
+"aßx" i "<0>aßx0>"
+"ASS(.)" i "<0>aß<1>x1>0>"
+
+# Case Insensitive, probe some corner cases.
+"ass+" i "aß" # Second 's' in pattern is qualified, can't combine with first.
+"as+" i "aß"
+"aßs" i "as" # Can't match half of a ß
+"aß+" i "<0>assssssss0>s"
+"aß+" i "<0>assßSssSSS0>s"
+"a(ß?)+" i "<0>assssssss<1>1>0>s"
+"a(ß?)+" i "<0>a<1>1>0>zzzzzzzzs"
+
+"\U00010400" i "<0>\U000104280>" # case folded supplemental code point.
+
+"sstuff" i "<0>ßtuff0>" # exercise optimizations on what chars can start a match.
+"sstuff" i "s<0>ßtuff0>" # exercise optimizations on what chars can start a match.
+"ßtuff" i "s<0>sstuff0>"
+"ßtuff" i "s<0>Sstuff0>"
+
+"a(..)\1" i "<0>A<1>bc1>BC0>def"
+"(ß)\1" i "aa<0><1>ss1>ß0>zz" # Case insensitive back reference
+"..(.)\1" i "<0>aa<1>ß1>ss0>"
+"ab(..)\1" i "xx<0>ab<1>ss1>ß0>ss"
+
+" (ss) ((\1.*)|(.*))" i "<0> <1>ss1> <2><4>sß4>2>0>" # The back reference 'ss' must not match in 'sß'
+
+# Bug 9057
+# \u200c and \u200d should be word characters.
+#
+"\w+" " <0>abc\u200cdef\u200dghi0> "
+"\w+" i " <0>abc\u200cdef\u200dghi0> "
+"[\w]+" " <0>abc\u200cdef\u200dghi0> "
+"[\w]+" i " <0>abc\u200cdef\u200dghi0> "
+
+# Bug 9283
+# uregex_open fails for look-behind assertion + case-insensitive
+
+"(ab)?(?<=ab)cd|ef" i "<0><1>ab1>cd0>"
+
+# Bug 9719 Loop breaking on (zero length match){3,} (unlimited upper bound).
+#
+
+"(?:abc){1,}abc" "<0>abcabcabcabcabc0>"
+"(?:2*){2,}?a2\z" "<0>2a20>"
+"(?:2*){2,}?a2\z" "2a3"
+"(?:x?+){3,}+yz" "w<0>yz0>"
+"(2*){2,}?a2\\z" "2a3"
+"(2*){2,}?a2\\z" "<0>2<1>1>a2\\z0>"
+"(2*){2,}?a2\z" "<0>2<1>1>a20>"
+
+
+# Bug 10024
+# Incorrect (unbounded) longest match length with {1, 20} style quantifiers.
+# Unbounded match is disallowed in look-behind expressions.
+# Max match length is used to limit where to check for look-behind matches.
+
+"(?<=a{1,5})bc" "aaaa<0>bc0>def"
+"(?<=(?:aa){3,20})bc" "aaaaaa<0>bc0>def"
+"(?jkl0>"
+"(?<=a{11})bc" "aaaaaaaaaaa<0>bc0>"
+"(?<=a{11})bc" "aaaaaaaaaabc"
+"(?<=a{1,})bc" E "aaaa<0>bc0>def" # U_REGEX_LOOK_BEHIND_LIMIT error.
+"(?<=(?:){11})bc" "<0>bc0>" # Empty (?:) expression.
+
+# Bug 10835
+# Match Start Set not being correctly computed for case insensitive patterns.
+# (Test here is to dump the compiled pattern & manually check the start set.)
+
+"(private|secret|confidential|classified|restricted)" i "hmm, <0><1>Classified1>0> stuff"
+"(private|secret|confidential|classified|restricted)" "hmm, Classified stuff"
+
+# Bug 10844
+
+"^([\w\d:]+)$" "<0><1>DiesIst1Beispiel:text1>0>"
+"^([\w\d:]+)$" i "<0><1>DiesIst1Beispiel:text1>0>"
+"^(\w+\d\w+:\w+)$" "<0><1>DiesIst1Beispiel:text1>0>"
+"^(\w+\d\w+:\w+)$" i "<0><1>DiesIst1Beispiel:text1>0>"
+
+# Bug 11049
+# Edge cases in find() when pattern match begins with set of code points
+# and the match begins at the end of the string.
+
+"A|B|C" "hello <0>A0>"
+"A|B|C" "hello \U00011234"
+"A|B|\U00012345" "hello <0>\U000123450>"
+"A|B|\U00010000" "hello \ud800"
+
+# Bug 11369
+# Incorrect optimization of patterns with a zero length quantifier {0}
+
+"(.|b)(|b){0}\$(?#xxx){3}(?>\D*)" "AAAAABBBBBCCCCCDDDDEEEEE"
+"(|b)ab(c)" "<0><1>1>ab<2>c2>0>"
+"(|b){0}a{3}(D*)" "<0>aaa<2>2>0>"
+"(|b){0,1}a{3}(D*)" "<0><1>1>aaa<2>2>0>"
+"((|b){0})a{3}(D*)" "<0><1>1>aaa<3>3>0>"
+
+# Bug 11370
+# Max match length computation of look-behind expression gives result that is too big to fit in the
+# in the 24 bit operand portion of the compiled code. Expressions should fail to compile
+# (Look-behind match length must be bounded. This case is treated as unbounded, an error.)
+
+"(?pre<1>\ud8001>post\ud8000> fin"
+"pre(.)post\1" i "pre\ud800post\ud800\udc00" # case insensiteve backrefs take a different code path
+"pre(.)post\1" i "<0>pre<1>\ud8001>post\ud8000> fin"
+
+# Bug 11554
+#
+# Maximum match length computation was assuming UTF-16.
+# Used in look-behind matches to constrain how far back to look.
+
+"(?<=a\x{100000})spam" "***a\x{100000}<0>spam0>**"
+"(?<=aą)spam" "**aą<0>spam0>**"
+"(?<=ąabc)spam" "**ąabc<0>spam0>**"
+
+"(?<=a\x{100000})spam" "***a\x{100001}spam**"
+"(?<=aą)spam" "**bąspam**"
+"(?<=ąabc)spam" "**ąabxspam**"
+
+# with negative look-behind
+
+"(?spam0>**"
+"(?spam0>**"
+"(?spam0>**"
+
+# Bug #12930
+#
+# Minimum Match Length computation, int32_t overflow on an empty set in the pattern.
+# The empty set, with no match possible, has a min match length of INT32_MAX.
+# Was incremented subsequently. Caused assertion failure on pattern compile.
+
+"[^\u0000-\U0010ffff]bc?" "bc no match"
+"[^\u0000-\U0010ffff]?bc?" "<0>bc0> has a match"
+
+# Bug #12160 Hit End behavior after find fails to find.
+# To match Java, should be true if find fails to find.
+#
+"abc" Z "<0>abc0> abc abc xyz"
+"abc" Z2 "abc <0>abc0> abc xyz"
+"abc" Z3 "abc abc <0>abc0> xyz"
+"abc" z4 "abc abc abc xyz"
+
+# Bug #13844 Verify that non-standard Java property names are recognized.
+"[\p{IsAlphabetic}]" " <0>A0>"
+"[\P{IsAlphabetic}]" "A<0> 0>"
+"[\p{IsIdeographic}]" "A<0>〆0>"
+"[\P{IsIdeographic}]" "〆<0>A0>"
+"[\p{IsLetter}]" " <0>A0>"
+"[\P{IsLetter}]" "A<0> 0>"
+"[\p{Letter}]" " <0>A0>"
+"[\p{IsLowercase}]" "A<0>a0>"
+"[\P{IsLowercase}]" "a<0>A0>"
+"[\p{IsUppercase}]" "a<0>A0>"
+"[\P{IsUppercase}]" "A<0>a0>"
+"[\p{IsTitlecase}]" "D<0>Dz0>"
+"[\P{IsTitlecase}]" "Dz<0>D0>"
+"[\p{IsPunctuation}]" " <0>&0>"
+"[\P{IsPunctuation}]" "&<0> 0>"
+"[\p{IsControl}]" " <0>\x{82}0>"
+"[\P{IsControl}]" "\x{82}<0> 0>"
+"[\p{IsWhite_Space}]" "x<0> 0>"
+"[\P{IsWhite_Space}]" " <0>x0>"
+"[\p{IsDigit}]" " <0>40>"
+"[\P{IsDigit}]" "4<0> 0>"
+"[\p{IsHex_Digit}]" " <0>F0>"
+"[\P{IsHex_Digit}]" "F<0> 0>"
+"[\p{IsJoin_Control}]" " <0>\x{200d}0>"
+"[\P{IsJoin_Control}]" "\x{200d}<0> 0>"
+"[\p{IsNoncharacter_Code_Point}]" "A<0>\x{5fffe}0>"
+"[\p{IsAssigned}]" "\x{10ffff}<0>a0>"
+"[\P{IsAssigned}]" "a<0>\x{10ffff}0>"
+
+"[\p{InBasic Latin}]" "〆<0>A0>"
+"[\p{InBasicLatin}]" "〆<0>A0>"
+"[\p{InBasic-Latin}]" "〆<0>A0>" # ICU accepts '-'; Java does not.
+"[\p{InBasic_Latin}]" "〆<0>A0>"
+"[\p{Inbasiclatin}]" "〆<0>A0>"
+"[\p{inbasiclatin}]" E "〆<0>A0>" # "In" must be cased as shown. Property name part is case insensitive.
+"[\p{InCombining_Marks_for_Symbols}]" "a<0>\x{20DD}0>" # COMBINING ENCLOSING CIRCLE
+
+"[\p{all}]*" "<0>\x{00}abc\x{10ffff}0>"
+"[\p{javaBadProperty}]" E "whatever"
+"[\p{IsBadProperty}]" E "whatever"
+"[\p{InBadBlock}]" E "whatever"
+"[\p{In}]" E "whatever"
+"[\p{Is}]" E "whatever"
+"[\p{java}]" "x<0>ꦉ0>" # Note: "java" is a valid script code.
+
+"[\p{javaLowerCase}]+" "A<0>a0>"
+"[\p{javaLowerCase}]+" i "<0>Aa0>"
+"[\P{javaLowerCase}]+" "<0>A0>a"
+"[\P{javaLowerCase}]+" i "Aa" # No Match because case fold of the set happens first, then negation.
+ # JDK is not case insensitive w named properties, even though
+ # the insensitive match flag is set. A JDK bug?
+
+"[a-z]+" i "<0>Aa0>" # Matches JDK behavior.
+"[^a-z]+" i "Aa" # (no match) which is JDK behavior. Case fold first, then negation.
+
+# Bug 20385. Assertion failure while compiling a negative look-behind expression consisting of a set with
+# no contents. Meaning the [set] can never match. There is no syntax to directly express
+# an empty set, so generate it by negating (^) a set of all code points.
+# Also check empty sets in other contexts.
+
+"(?0>abc"
+
+"(?0>abc"
+"x(?x0>abc"
+"x(?x0>abc"
+"x(?x0>abc"
+
+"[^\u0000-\U0010ffff]" "a"
+"[^[^\u0000-\U0010ffff]]" "<0>a0>"
+
+"This is a string with (?:one |two |three )endings" "<0>This is a string with two endings0>"
+
+# Bug ICU-20544. Similar to 20385, above. Assertion failure with a negative look-behind assertion containing
+# a set with no contents. Look-behind pattern includes more than just the empty set.
+
+"(?0>abc" # note: first 'ⰿ' is \u2c3f, hence empty set.
+"(?0>abc"
+"(?<=[^[^]]†)" "abc" # Problem also exists w positive look-behind
+
+# Bug ICU-20391. Crash in computation of minimum match length with nested look-around patterns.
+#
+"(?<=(?<=((?=)){0}+)" E "aaa"
+"(?<=(?<=((?=)){0}+))" "<0>0>"
+"(?<=c(?<=b((?=a)){1}+))" "aaa"
+"abc(?=de(?=f))...g" "<0>abcdefg0>"
+"abc(?=de(?=f))...g" "abcdxfg"
+
+# Bug ICU-20618 Assertion failure with nested look-around expressions.
+#
+"(?<=(?<=b?(?=a)))" "hello, world."
+
+# Bug ICU-20939
+# Incorrect word \b boundaries w UTF-8 input and non-ASCII text
+#
+"(?w)\b" v2 "äää<0>0> äää"
+
+# Bug ICU-21492 Assertion failure with nested look-around expressions.
+#
+"(?<=(?:(?<=(?:(?<=(?:(?<=)){2})){3})){4}" E "<0>0>" # orig failure from bug report, w mismatched parens.
+"(?:(?<=(?:(?<=)){2}))" "<0>0>" # Simplified case, with a valid pattern.
+
+# Random debugging, Temporary
+#
+
+#
+# Regexps from http://www.regexlib.com
+#
+"^[a-zA-Z]{1,2}[0-9][0-9A-Za-z]{0,1} {0,1}[0-9][A-Za-z]{2}$" G "<0>G1 1AA0>"
+"^[a-zA-Z]{1,2}[0-9][0-9A-Za-z]{0,1} {0,1}[0-9][A-Za-z]{2}$" G "<0>EH10 2QQ0>"
+"^[a-zA-Z]{1,2}[0-9][0-9A-Za-z]{0,1} {0,1}[0-9][A-Za-z]{2}$" G "<0>SW1 1ZZ0>"
+"^[a-zA-Z]{1,2}[0-9][0-9A-Za-z]{0,1} {0,1}[0-9][A-Za-z]{2}$" "G111 1AA"
+"^[a-zA-Z]{1,2}[0-9][0-9A-Za-z]{0,1} {0,1}[0-9][A-Za-z]{2}$" "X10 WW"
+"^[a-zA-Z]{1,2}[0-9][0-9A-Za-z]{0,1} {0,1}[0-9][A-Za-z]{2}$" "DDD 5WW"
+#"^[\w\-]+(?:\.[\w\-]+)*@(?:[\w\-]+\.)+[a-zA-Z]{2,7}$" dG "<0>joe.tillis@unit.army.mil0>" # TODO: \w in pattern
+#"^[\w-]+(?:\.[\w-]+)*@(?:[\w-]+\.)+[a-zA-Z]{2,7}$" G "<0>jack_rabbit@slims.com0>" # TODO: \w in pattern
+#"^[\w-]+(?:\.[\w-]+)*@(?:[\w-]+\.)+[a-zA-Z]{2,7}$" G "<0>foo99@foo.co.uk0>" # TODO: \w in pattern
+#"^[\w-]+(?:\.[\w-]+)*@(?:[\w-]+\.)+[a-zA-Z]{2,7}$" "find_the_mistake.@foo.org" # TODO: \w in pattern
+#"^[\w-]+(?:\.[\w-]+)*@(?:[\w-]+\.)+[a-zA-Z]{2,7}$" ".prefix.@some.net"
+"^([a-zA-Z0-9_\-\.]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?)$" G "<0>asmith@mactec.com0>"
+"^([a-zA-Z0-9_\-\.]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?)$" G "<0>foo12@foo.edu0>"
+"^([a-zA-Z0-9_\-\.]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?)$" G "<0>bob.smith@foo.tv0>"
+"^([a-zA-Z0-9_\-\.]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?)$" "joe"
+"^([a-zA-Z0-9_\-\.]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?)$" "@foo.com"
+"^([a-zA-Z0-9_\-\.]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?)$" "a@a"
+"^\d{1,2}\/\d{1,2}\/\d{4}$" G "<0>4/1/20010>"
+"^\d{1,2}\/\d{1,2}\/\d{4}$" G "<0>12/12/20010>"
+"^\d{1,2}\/\d{1,2}\/\d{4}$" G "<0>55/5/34340>"
+"^\d{1,2}\/\d{1,2}\/\d{4}$" "1/1/01"
+"^\d{1,2}\/\d{1,2}\/\d{4}$" "12 Jan 01"
+"^\d{1,2}\/\d{1,2}\/\d{4}$" "1-1-2001"
+"^(?:(?:(?:0?[13578]|1[02])(\/|-|\.)31)\1|(?:(?:0?[1,3-9]|1[0-2])(\/|-|\.)(?:29|30)\2))(?:(?:1[6-9]|[2-9]\d)?\d{2})$|^(?:0?2(\/|-|\.)29\3(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00))))$|^(?:(?:0?[1-9])|(?:1[0-2]))(\/|-|\.)(?:0?[1-9]|1\d|2[0-8])\4(?:(?:1[6-9]|[2-9]\d)?\d{2})$" G "<0>01.1.020>"
+"^(?:(?:(?:0?[13578]|1[02])(\/|-|\.)31)\1|(?:(?:0?[1,3-9]|1[0-2])(\/|-|\.)(?:29|30)\2))(?:(?:1[6-9]|[2-9]\d)?\d{2})$|^(?:0?2(\/|-|\.)29\3(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00))))$|^(?:(?:0?[1-9])|(?:1[0-2]))(\/|-|\.)(?:0?[1-9]|1\d|2[0-8])\4(?:(?:1[6-9]|[2-9]\d)?\d{2})$" G "<0>11-30-20010>"
+"^(?:(?:(?:0?[13578]|1[02])(\/|-|\.)31)\1|(?:(?:0?[1,3-9]|1[0-2])(\/|-|\.)(?:29|30)\2))(?:(?:1[6-9]|[2-9]\d)?\d{2})$|^(?:0?2(\/|-|\.)29\3(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00))))$|^(?:(?:0?[1-9])|(?:1[0-2]))(\/|-|\.)(?:0?[1-9]|1\d|2[0-8])\4(?:(?:1[6-9]|[2-9]\d)?\d{2})$" G "<0>2/29/20000>"
+"^(?:(?:(?:0?[13578]|1[02])(\/|-|\.)31)\1|(?:(?:0?[1,3-9]|1[0-2])(\/|-|\.)(?:29|30)\2))(?:(?:1[6-9]|[2-9]\d)?\d{2})$|^(?:0?2(\/|-|\.)29\3(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00))))$|^(?:(?:0?[1-9])|(?:1[0-2]))(\/|-|\.)(?:0?[1-9]|1\d|2[0-8])\4(?:(?:1[6-9]|[2-9]\d)?\d{2})$" "02/29/01"
+"^(?:(?:(?:0?[13578]|1[02])(\/|-|\.)31)\1|(?:(?:0?[1,3-9]|1[0-2])(\/|-|\.)(?:29|30)\2))(?:(?:1[6-9]|[2-9]\d)?\d{2})$|^(?:0?2(\/|-|\.)29\3(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00))))$|^(?:(?:0?[1-9])|(?:1[0-2]))(\/|-|\.)(?:0?[1-9]|1\d|2[0-8])\4(?:(?:1[6-9]|[2-9]\d)?\d{2})$" "13/01/2002"
+"^(?:(?:(?:0?[13578]|1[02])(\/|-|\.)31)\1|(?:(?:0?[1,3-9]|1[0-2])(\/|-|\.)(?:29|30)\2))(?:(?:1[6-9]|[2-9]\d)?\d{2})$|^(?:0?2(\/|-|\.)29\3(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00))))$|^(?:(?:0?[1-9])|(?:1[0-2]))(\/|-|\.)(?:0?[1-9]|1\d|2[0-8])\4(?:(?:1[6-9]|[2-9]\d)?\d{2})$" "11/00/02"
+"^(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9])\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[0-9])$" G "<0>127.0.0.10>"
+"^(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9])\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[0-9])$" G "<0>255.255.255.00>"
+"^(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9])\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[0-9])$" G "<0>192.168.0.10>"
+"^(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9])\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[0-9])$" "1200.5.4.3"
+"^(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9])\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[0-9])$" "abc.def.ghi.jkl"
+"^(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9])\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[0-9])$" "255.foo.bar.1"
+"(AUX|PRN|NUL|COM\d|LPT\d)+\s*$" G "<0>COM10>"
+"(AUX|PRN|NUL|COM\d|LPT\d)+\s*$" G "<0>AUX0>"
+"(AUX|PRN|NUL|COM\d|LPT\d)+\s*$" G "<0>LPT10>"
+"(AUX|PRN|NUL|COM\d|LPT\d)+\s*$" "image.jpg"
+"(AUX|PRN|NUL|COM\d|LPT\d)+\s*$" "index.html"
+"(AUX|PRN|NUL|COM\d|LPT\d)+\s*$" "readme.txt"
+"^(?:(?:31(\/|-|\.)(?:0?[13578]|1[02]))\1|(?:(?:29|30)(\/|-|\.)(?:0?[1,3-9]|1[0-2])\2))(?:(?:1[6-9]|[2-9]\d)?\d{2})$|^(?:29(\/|-|\.)0?2\3(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00))))$|^(?:0?[1-9]|1\d|2[0-8])(\/|-|\.)(?:(?:0?[1-9])|(?:1[0-2]))\4(?:(?:1[6-9]|[2-9]\d)?\d{2})$" G "<0>29/02/19720>"
+"^(?:(?:31(\/|-|\.)(?:0?[13578]|1[02]))\1|(?:(?:29|30)(\/|-|\.)(?:0?[1,3-9]|1[0-2])\2))(?:(?:1[6-9]|[2-9]\d)?\d{2})$|^(?:29(\/|-|\.)0?2\3(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00))))$|^(?:0?[1-9]|1\d|2[0-8])(\/|-|\.)(?:(?:0?[1-9])|(?:1[0-2]))\4(?:(?:1[6-9]|[2-9]\d)?\d{2})$" G "<0>5-9-980>"
+"^(?:(?:31(\/|-|\.)(?:0?[13578]|1[02]))\1|(?:(?:29|30)(\/|-|\.)(?:0?[1,3-9]|1[0-2])\2))(?:(?:1[6-9]|[2-9]\d)?\d{2})$|^(?:29(\/|-|\.)0?2\3(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00))))$|^(?:0?[1-9]|1\d|2[0-8])(\/|-|\.)(?:(?:0?[1-9])|(?:1[0-2]))\4(?:(?:1[6-9]|[2-9]\d)?\d{2})$" G "<0>10-11-20020>"
+"^(?:(?:31(\/|-|\.)(?:0?[13578]|1[02]))\1|(?:(?:29|30)(\/|-|\.)(?:0?[1,3-9]|1[0-2])\2))(?:(?:1[6-9]|[2-9]\d)?\d{2})$|^(?:29(\/|-|\.)0?2\3(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00))))$|^(?:0?[1-9]|1\d|2[0-8])(\/|-|\.)(?:(?:0?[1-9])|(?:1[0-2]))\4(?:(?:1[6-9]|[2-9]\d)?\d{2})$" "29/02/2003"
+"^(?:(?:31(\/|-|\.)(?:0?[13578]|1[02]))\1|(?:(?:29|30)(\/|-|\.)(?:0?[1,3-9]|1[0-2])\2))(?:(?:1[6-9]|[2-9]\d)?\d{2})$|^(?:29(\/|-|\.)0?2\3(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00))))$|^(?:0?[1-9]|1\d|2[0-8])(\/|-|\.)(?:(?:0?[1-9])|(?:1[0-2]))\4(?:(?:1[6-9]|[2-9]\d)?\d{2})$" "12/13/2002"
+"^(?:(?:31(\/|-|\.)(?:0?[13578]|1[02]))\1|(?:(?:29|30)(\/|-|\.)(?:0?[1,3-9]|1[0-2])\2))(?:(?:1[6-9]|[2-9]\d)?\d{2})$|^(?:29(\/|-|\.)0?2\3(?:(?:(?:1[6-9]|[2-9]\d)?(?:0[48]|[2468][048]|[13579][26])|(?:(?:16|[2468][048]|[3579][26])00))))$|^(?:0?[1-9]|1\d|2[0-8])(\/|-|\.)(?:(?:0?[1-9])|(?:1[0-2]))\4(?:(?:1[6-9]|[2-9]\d)?\d{2})$" "1-1-1500"
+"^(user=([a-z0-9]+,)*(([a-z0-9]+){1});)?(group=([a-z0-9]+,)*(([a-z0-9]+){1});)?(level=[0-9]+;)?$" G "<0>user=foo,bar,quux;group=manager,admin;level=100;0>"
+"^(user=([a-z0-9]+,)*(([a-z0-9]+){1});)?(group=([a-z0-9]+,)*(([a-z0-9]+){1});)?(level=[0-9]+;)?$" G "<0>group=nobody;level=24;0>"
+"^(user=([a-z0-9]+,)*(([a-z0-9]+){1});)?(group=([a-z0-9]+,)*(([a-z0-9]+){1});)?(level=[0-9]+;)?$" "user=foo"
+"^(user=([a-z0-9]+,)*(([a-z0-9]+){1});)?(group=([a-z0-9]+,)*(([a-z0-9]+){1});)?(level=[0-9]+;)?$" "blahh"
+"^(\(?\+?[0-9]*\)?)?[0-9_\- \(\)]*$" G "<0>(+44)(0)20-123412340>"
+"^(\(?\+?[0-9]*\)?)?[0-9_\- \(\)]*$" G "<0>020123412340>"
+"^(\(?\+?[0-9]*\)?)?[0-9_\- \(\)]*$" G "<0>+44 (0) 1234-12340>"
+"^(\(?\+?[0-9]*\)?)?[0-9_\- \(\)]*$" "(44+)020-12341234"
+"^(\(?\+?[0-9]*\)?)?[0-9_\- \(\)]*$" "12341234(+020)"
+"\b(\w+)\s+\1\b" G "<0>Tell the the preacher0>"
+"\b(\w+)\s+\1\b" G "<0>some some0>"
+"\b(\w+)\s+\1\b" G "<0>hubba hubba0>"
+"\b(\w+)\s+\1\b" "once an annual report"
+"\b(\w+)\s+\1\b" "mandate dated submissions"
+"\b(\w+)\s+\1\b" "Hubba hubba"
+"(^\+[0-9]{2}|^\+[0-9]{2}\(0\)|^\(\+[0-9]{2}\)\(0\)|^00[0-9]{2}|^0)([0-9]{9}$|[0-9\-\s]{10}$)" G "<0>+312352566770>"
+"(^\+[0-9]{2}|^\+[0-9]{2}\(0\)|^\(\+[0-9]{2}\)\(0\)|^00[0-9]{2}|^0)([0-9]{9}$|[0-9\-\s]{10}$)" G "<0>+31(0)2352566770>"
+"(^\+[0-9]{2}|^\+[0-9]{2}\(0\)|^\(\+[0-9]{2}\)\(0\)|^00[0-9]{2}|^0)([0-9]{9}$|[0-9\-\s]{10}$)" G "<0>023-52566770>"
+"(^\+[0-9]{2}|^\+[0-9]{2}\(0\)|^\(\+[0-9]{2}\)\(0\)|^00[0-9]{2}|^0)([0-9]{9}$|[0-9\-\s]{10}$)" "+3123525667788999"
+"(^\+[0-9]{2}|^\+[0-9]{2}\(0\)|^\(\+[0-9]{2}\)\(0\)|^00[0-9]{2}|^0)([0-9]{9}$|[0-9\-\s]{10}$)" "3123525667788"
+"(^\+[0-9]{2}|^\+[0-9]{2}\(0\)|^\(\+[0-9]{2}\)\(0\)|^00[0-9]{2}|^0)([0-9]{9}$|[0-9\-\s]{10}$)" "232-2566778"
+"^[-+]?\d*\.?\d*$" G "<0>1230>"
+"^[-+]?\d*\.?\d*$" G "<0>+3.141590>"
+"^[-+]?\d*\.?\d*$" G "<0>-3.141590>"
+"^[-+]?\d*\.?\d*$" "abc"
+"^[-+]?\d*\.?\d*$" "3.4.5"
+"^[-+]?\d*\.?\d*$" "$99.95"
+"^\$?([1-9]{1}[0-9]{0,2}(\,[0-9]{3})*(\.[0-9]{0,2})?|[1-9]{1}[0-9]{0,}(\.[0-9]{0,2})?|0(\.[0-9]{0,2})?|(\.[0-9]{1,2})?)$" G "<0>$1,234.500>"
+"^\$?([1-9]{1}[0-9]{0,2}(\,[0-9]{3})*(\.[0-9]{0,2})?|[1-9]{1}[0-9]{0,}(\.[0-9]{0,2})?|0(\.[0-9]{0,2})?|(\.[0-9]{1,2})?)$" G "<0>$0.700>"
+"^\$?([1-9]{1}[0-9]{0,2}(\,[0-9]{3})*(\.[0-9]{0,2})?|[1-9]{1}[0-9]{0,}(\.[0-9]{0,2})?|0(\.[0-9]{0,2})?|(\.[0-9]{1,2})?)$" G "<0>.70>"
+"^\$?([1-9]{1}[0-9]{0,2}(\,[0-9]{3})*(\.[0-9]{0,2})?|[1-9]{1}[0-9]{0,}(\.[0-9]{0,2})?|0(\.[0-9]{0,2})?|(\.[0-9]{1,2})?)$" "$0,123.50"
+"^\$?([1-9]{1}[0-9]{0,2}(\,[0-9]{3})*(\.[0-9]{0,2})?|[1-9]{1}[0-9]{0,}(\.[0-9]{0,2})?|0(\.[0-9]{0,2})?|(\.[0-9]{1,2})?)$" "$00.5"
+"^[A-Z]{2}[0-9]{6}[A-DFM]{1}$" G "<0>AB123456D0>"
+"^[A-Z]{2}[0-9]{6}[A-DFM]{1}$" G "<0>AB123456F0>"
+"^[A-Z]{2}[0-9]{6}[A-DFM]{1}$" G "<0>AB123456M0>"
+"^[A-Z]{2}[0-9]{6}[A-DFM]{1}$" "AB123456E"
+"^[A-Z]{2}[0-9]{6}[A-DFM]{1}$" "ab123456d"
+#"(http|ftp|https):\/\/[\w]+(.[\w]+)([\w\-\.,@?^=%&:/~\+#]*[\w\-\@?^=%&/~\+#])?" G "<0>http://regxlib.com/Default.aspx0>" # TODO: \w in pattern
+#"(http|ftp|https):\/\/[\w]+(.[\w]+)([\w\-\.,@?^=%&:/~\+#]*[\w\-\@?^=%&/~\+#])?" G "<0>http://electronics.cnet.com/electronics/0-6342366-8-8994967-1.html0>" # TODO: \w in pattern
+#"(http|ftp|https):\/\/[\w]+(.[\w]+)([\w\-\.,@?^=%&:/~\+#]*[\w\-\@?^=%&/~\+#])?" "www.yahoo.com" # TODO: \w in pattern
+"^[0-9]{4}\s{0,1}[a-zA-Z]{2}$" G "<0>2034AK0>"
+"^[0-9]{4}\s{0,1}[a-zA-Z]{2}$" G "<0>2034 AK0>"
+"^[0-9]{4}\s{0,1}[a-zA-Z]{2}$" G "<0>2034 ak0>"
+"^[0-9]{4}\s{0,1}[a-zA-Z]{2}$" "2034 AK"
+"^[0-9]{4}\s{0,1}[a-zA-Z]{2}$" "321321 AKSSAA"
+"((\d{2})|(\d))\/((\d{2})|(\d))\/((\d{4})|(\d{2}))" G "<0>4/5/910>"
+"((\d{2})|(\d))\/((\d{2})|(\d))\/((\d{4})|(\d{2}))" G "<0>04/5/19910>"
+"((\d{2})|(\d))\/((\d{2})|(\d))\/((\d{4})|(\d{2}))" G "<0>4/05/890>"
+"((\d{2})|(\d))\/((\d{2})|(\d))\/((\d{4})|(\d{2}))" "4/5/1"
+#"(^|\s|\()((([1-9]){1}|([0][1-9]){1}|([1][012]){1}){1}[\/-]((2[0-9]){1}|(3[01]){1}|([01][1-9]){1}|([1-9]){1}){1}[\/-](((19|20)([0-9][0-9]){1}|([0-9][0-9]){1})){1}(([\s|\)|:])|(^|\s|\()((([0-9]){1}|([0][1-9]){1}|([1][012]){1}){1}[\/-](([11-31]){1}|([01][1-9]){1}|([1-9]){1}){1}[\/-](((19|20)([0-9][0-9]){1}|([0-9][0-9]){1})){1}(([\s|\)|:|$|\>])){1}){1}){1}){1}" G "<0>01/01/2001 0>" #TODO - \s in pattern.
+"(^|\s|\()((([1-9]){1}|([0][1-9]){1}|([1][012]){1}){1}[\/-]((2[0-9]){1}|(3[01]){1}|([01][1-9]){1}|([1-9]){1}){1}[\/-](((19|20)([0-9][0-9]){1}|([0-9][0-9]){1})){1}(([\s|\)|:])|(^|\s|\()((([0-9]){1}|([0][1-9]){1}|([1][012]){1}){1}[\/-](([11-31]){1}|([01][1-9]){1}|([1-9]){1}){1}[\/-](((19|20)([0-9][0-9]){1}|([0-9][0-9]){1})){1}(([\s|\)|:|$|\>])){1}){1}){1}){1}" G "<0>01-01-2001:0>"
+"(^|\s|\()((([1-9]){1}|([0][1-9]){1}|([1][012]){1}){1}[\/-]((2[0-9]){1}|(3[01]){1}|([01][1-9]){1}|([1-9]){1}){1}[\/-](((19|20)([0-9][0-9]){1}|([0-9][0-9]){1})){1}(([\s|\)|:])|(^|\s|\()((([0-9]){1}|([0][1-9]){1}|([1][012]){1}){1}[\/-](([11-31]){1}|([01][1-9]){1}|([1-9]){1}){1}[\/-](((19|20)([0-9][0-9]){1}|([0-9][0-9]){1})){1}(([\s|\)|:|$|\>])){1}){1}){1}){1}" G "<0>(1-1-01)0>"
+"(^|\s|\()((([1-9]){1}|([0][1-9]){1}|([1][012]){1}){1}[\/-]((2[0-9]){1}|(3[01]){1}|([01][1-9]){1}|([1-9]){1}){1}[\/-](((19|20)([0-9][0-9]){1}|([0-9][0-9]){1})){1}(([\s|\)|:])|(^|\s|\()((([0-9]){1}|([0][1-9]){1}|([1][012]){1}){1}[\/-](([11-31]){1}|([01][1-9]){1}|([1-9]){1}){1}[\/-](((19|20)([0-9][0-9]){1}|([0-9][0-9]){1})){1}(([\s|\)|:|$|\>])){1}){1}){1}){1}" "13/1/2001"
+"(^|\s|\()((([1-9]){1}|([0][1-9]){1}|([1][012]){1}){1}[\/-]((2[0-9]){1}|(3[01]){1}|([01][1-9]){1}|([1-9]){1}){1}[\/-](((19|20)([0-9][0-9]){1}|([0-9][0-9]){1})){1}(([\s|\)|:])|(^|\s|\()((([0-9]){1}|([0][1-9]){1}|([1][012]){1}){1}[\/-](([11-31]){1}|([01][1-9]){1}|([1-9]){1}){1}[\/-](((19|20)([0-9][0-9]){1}|([0-9][0-9]){1})){1}(([\s|\)|:|$|\>])){1}){1}){1}){1}" "1-32-2001"
+"(^|\s|\()((([1-9]){1}|([0][1-9]){1}|([1][012]){1}){1}[\/-]((2[0-9]){1}|(3[01]){1}|([01][1-9]){1}|([1-9]){1}){1}[\/-](((19|20)([0-9][0-9]){1}|([0-9][0-9]){1})){1}(([\s|\)|:])|(^|\s|\()((([0-9]){1}|([0][1-9]){1}|([1][012]){1}){1}[\/-](([11-31]){1}|([01][1-9]){1}|([1-9]){1}){1}[\/-](((19|20)([0-9][0-9]){1}|([0-9][0-9]){1})){1}(([\s|\)|:|$|\>])){1}){1}){1}){1}" "1-1-1801"
+"^\d{3}\s?\d{3}$" G "<0>400 0990>"
+"^\d{3}\s?\d{3}$" G "<0>4000990>"
+"^\d{3}\s?\d{3}$" G "<0>4000500>"
+"^\d{3}\s?\d{3}$" "2345678"
+"^\d{3}\s?\d{3}$" "12345"
+"^\d{3}\s?\d{3}$" "asdf"
+"^\D?(\d{3})\D?\D?(\d{3})\D?(\d{4})$" G "<0>(111) 222-33330>"
+"^\D?(\d{3})\D?\D?(\d{3})\D?(\d{4})$" G "<0>11122233330>"
+"^\D?(\d{3})\D?\D?(\d{3})\D?(\d{4})$" G "<0>111-222-33330>"
+"^\D?(\d{3})\D?\D?(\d{3})\D?(\d{4})$" "11122223333"
+"^\D?(\d{3})\D?\D?(\d{3})\D?(\d{4})$" "11112223333"
+"^\D?(\d{3})\D?\D?(\d{3})\D?(\d{4})$" "11122233333"
+"^#?([a-f]|[A-F]|[0-9]){3}(([a-f]|[A-F]|[0-9]){3})?$" G "<0>#00ccff0>"
+"^#?([a-f]|[A-F]|[0-9]){3}(([a-f]|[A-F]|[0-9]){3})?$" G "<0>#0390>"
+"^#?([a-f]|[A-F]|[0-9]){3}(([a-f]|[A-F]|[0-9]){3})?$" G "<0>ffffcc0>"
+"^#?([a-f]|[A-F]|[0-9]){3}(([a-f]|[A-F]|[0-9]){3})?$" "blue"
+"^#?([a-f]|[A-F]|[0-9]){3}(([a-f]|[A-F]|[0-9]){3})?$" "0x000000"
+"^#?([a-f]|[A-F]|[0-9]){3}(([a-f]|[A-F]|[0-9]){3})?$" "#ff000"
+"^([0-9a-fA-F][0-9a-fA-F]:){5}([0-9a-fA-F][0-9a-fA-F])$" G "<0>01:23:45:67:89:ab0>"
+"^([0-9a-fA-F][0-9a-fA-F]:){5}([0-9a-fA-F][0-9a-fA-F])$" G "<0>01:23:45:67:89:AB0>"
+"^([0-9a-fA-F][0-9a-fA-F]:){5}([0-9a-fA-F][0-9a-fA-F])$" G "<0>fE:dC:bA:98:76:540>"
+"^([0-9a-fA-F][0-9a-fA-F]:){5}([0-9a-fA-F][0-9a-fA-F])$" "01:23:45:67:89:ab:cd"
+"^([0-9a-fA-F][0-9a-fA-F]:){5}([0-9a-fA-F][0-9a-fA-F])$" "01:23:45:67:89:Az"
+"^([0-9a-fA-F][0-9a-fA-F]:){5}([0-9a-fA-F][0-9a-fA-F])$" "01:23:45:56:"
+"^(http|https|ftp)\://[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(:[a-zA-Z0-9]*)?/?([a-zA-Z0-9\-\._\?\,\'/\\\+\&%\$#\=~])*$" G "<0>http://www.blah.com/~joe0>"
+"^(http|https|ftp)\://[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(:[a-zA-Z0-9]*)?/?([a-zA-Z0-9\-\._\?\,\'/\\\+\&%\$#\=~])*$" G "<0>ftp://ftp.blah.co.uk:2828/blah%20blah.gif0>"
+"^(http|https|ftp)\://[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(:[a-zA-Z0-9]*)?/?([a-zA-Z0-9\-\._\?\,\'/\\\+\&%\$#\=~])*$" G "<0>https://blah.gov/blah-blah.as0>"
+"^(http|https|ftp)\://[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(:[a-zA-Z0-9]*)?/?([a-zA-Z0-9\-\._\?\,\'/\\\+\&%\$#\=~])*$" "www.blah.com"
+"^(http|https|ftp)\://[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(:[a-zA-Z0-9]*)?/?([a-zA-Z0-9\-\._\?\,\'/\\\+\&%\$#\=~])*$" "http://www.blah.com/I have spaces!"
+"^(http|https|ftp)\://[a-zA-Z0-9\-\.]+\.[a-zA-Z]{2,3}(:[a-zA-Z0-9]*)?/?([a-zA-Z0-9\-\._\?\,\'/\\\+\&%\$#\=~])*$" "ftp://blah_underscore/[nope]"
+"^(([0-2]\d|[3][0-1])\/([0]\d|[1][0-2])\/[2][0]\d{2})$|^(([0-2]\d|[3][0-1])\/([0]\d|[1][0-2])\/[2][0]\d{2}\s([0-1]\d|[2][0-3])\:[0-5]\d\:[0-5]\d)$" G "<0>12/01/20020>"
+"^(([0-2]\d|[3][0-1])\/([0]\d|[1][0-2])\/[2][0]\d{2})$|^(([0-2]\d|[3][0-1])\/([0]\d|[1][0-2])\/[2][0]\d{2}\s([0-1]\d|[2][0-3])\:[0-5]\d\:[0-5]\d)$" G "<0>12/01/2002 12:32:100>"
+"^(([0-2]\d|[3][0-1])\/([0]\d|[1][0-2])\/[2][0]\d{2})$|^(([0-2]\d|[3][0-1])\/([0]\d|[1][0-2])\/[2][0]\d{2}\s([0-1]\d|[2][0-3])\:[0-5]\d\:[0-5]\d)$" "32/12/2002"
+"^(([0-2]\d|[3][0-1])\/([0]\d|[1][0-2])\/[2][0]\d{2})$|^(([0-2]\d|[3][0-1])\/([0]\d|[1][0-2])\/[2][0]\d{2}\s([0-1]\d|[2][0-3])\:[0-5]\d\:[0-5]\d)$" "12/13/2001"
+"^(([0-2]\d|[3][0-1])\/([0]\d|[1][0-2])\/[2][0]\d{2})$|^(([0-2]\d|[3][0-1])\/([0]\d|[1][0-2])\/[2][0]\d{2}\s([0-1]\d|[2][0-3])\:[0-5]\d\:[0-5]\d)$" "12/02/06"
+"^[0-9](\.[0-9]+)?$" G "<0>1.23450>"
+"^[0-9](\.[0-9]+)?$" G "<0>0.000010>"
+"^[0-9](\.[0-9]+)?$" G "<0>70>"
+"^[0-9](\.[0-9]+)?$" "12.2"
+"^[0-9](\.[0-9]+)?$" "1.10.1"
+"^[0-9](\.[0-9]+)?$" "15.98"
+"^(?:[mM]{1,3})?(?:(?:[cC][dDmM])|(?:[dD]?(?:[cC]{1,3})?))?[lL]?(([xX])(?:\2{1,2}|[lL]|[cC])?)?((([iI])((\5{1,2})|[vV]|[xX]|[lL])?)|([vV]?([iI]{1,3})?))?$" G "<0>III0>"
+"^(?:[mM]{1,3})?(?:(?:[cC][dDmM])|(?:[dD]?(?:[cC]{1,3})?))?[lL]?(([xX])(?:\2{1,2}|[lL]|[cC])?)?((([iI])((\5{1,2})|[vV]|[xX]|[lL])?)|([vV]?([iI]{1,3})?))?$" G "<0>xiv0>"
+"^(?:[mM]{1,3})?(?:(?:[cC][dDmM])|(?:[dD]?(?:[cC]{1,3})?))?[lL]?(([xX])(?:\2{1,2}|[lL]|[cC])?)?((([iI])((\5{1,2})|[vV]|[xX]|[lL])?)|([vV]?([iI]{1,3})?))?$" G "<0>MCMLXLIX0>"
+"^(?:[mM]{1,3})?(?:(?:[cC][dDmM])|(?:[dD]?(?:[cC]{1,3})?))?[lL]?(([xX])(?:\2{1,2}|[lL]|[cC])?)?((([iI])((\5{1,2})|[vV]|[xX]|[lL])?)|([vV]?([iI]{1,3})?))?$" "iiV"
+"^(?:[mM]{1,3})?(?:(?:[cC][dDmM])|(?:[dD]?(?:[cC]{1,3})?))?[lL]?(([xX])(?:\2{1,2}|[lL]|[cC])?)?((([iI])((\5{1,2})|[vV]|[xX]|[lL])?)|([vV]?([iI]{1,3})?))?$" "MCCM"
+"^(?:[mM]{1,3})?(?:(?:[cC][dDmM])|(?:[dD]?(?:[cC]{1,3})?))?[lL]?(([xX])(?:\2{1,2}|[lL]|[cC])?)?((([iI])((\5{1,2})|[vV]|[xX]|[lL])?)|([vV]?([iI]{1,3})?))?$" "XXXX"
+"^[-+]?[0-9]+[.]?[0-9]*([eE][-+]?[0-9]+)?$" G "<0>1230>"
+"^[-+]?[0-9]+[.]?[0-9]*([eE][-+]?[0-9]+)?$" G "<0>-123.350>"
+"^[-+]?[0-9]+[.]?[0-9]*([eE][-+]?[0-9]+)?$" G "<0>-123.35e-20>"
+"^[-+]?[0-9]+[.]?[0-9]*([eE][-+]?[0-9]+)?$" "abc"
+"^[-+]?[0-9]+[.]?[0-9]*([eE][-+]?[0-9]+)?$" "123.32e"
+"^[-+]?[0-9]+[.]?[0-9]*([eE][-+]?[0-9]+)?$" "123.32.3"
+"^[a-zA-Z]+(([\'\,\.\- ][a-zA-Z ])?[a-zA-Z]*)*$" G "<0>T.F. Johnson0>"
+"^[a-zA-Z]+(([\'\,\.\- ][a-zA-Z ])?[a-zA-Z]*)*$" G "<0>John O'Neil0>"
+"^[a-zA-Z]+(([\'\,\.\- ][a-zA-Z ])?[a-zA-Z]*)*$" G "<0>Mary-Kate Johnson0>"
+"^[a-zA-Z]+(([\'\,\.\- ][a-zA-Z ])?[a-zA-Z]*)*$" "sam_johnson"
+"^[a-zA-Z]+(([\'\,\.\- ][a-zA-Z ])?[a-zA-Z]*)*$" "Joe--Bob Jones"
+"^[a-zA-Z]+(([\'\,\.\- ][a-zA-Z ])?[a-zA-Z]*)*$" "dfjsd0rd"
+"^(20|21|22|23|[0-1]\d)[0-5]\d$" G "<0>12000>"
+"^(20|21|22|23|[0-1]\d)[0-5]\d$" G "<0>16450>"
+"^(20|21|22|23|[0-1]\d)[0-5]\d$" G "<0>23590>"
+"^(20|21|22|23|[0-1]\d)[0-5]\d$" "2400"
+"^(20|21|22|23|[0-1]\d)[0-5]\d$" "asbc"
+"^(20|21|22|23|[0-1]\d)[0-5]\d$" "12:45"
+/<[^>]*\n?.*=("|')?(.*\.jpg)("|')?.*\n?[^<]*>/ G '<0>0>'
+/<[^>]*\n?.*=("|')?(.*\.jpg)("|')?.*\n?[^<]*>/ G "<0>0>"
+/<[^>]*\n?.*=("|')?(.*\.jpg)("|')?.*\n?[^<]*>/ G "<0>0>"
+/<[^>]*\n?.*=("|')?(.*\.jpg)("|')?.*\n?[^<]*>/ "= img.jpg"
+/<[^>]*\n?.*=("|')?(.*\.jpg)("|')?.*\n?[^<]*>/ "img.jpg"
+"^(\d{5}-\d{4}|\d{5})$|^([a-zA-Z]\d[a-zA-Z] \d[a-zA-Z]\d)$" G "<0>787540>"
+"^(\d{5}-\d{4}|\d{5})$|^([a-zA-Z]\d[a-zA-Z] \d[a-zA-Z]\d)$" G "<0>78754-12340>"
+"^(\d{5}-\d{4}|\d{5})$|^([a-zA-Z]\d[a-zA-Z] \d[a-zA-Z]\d)$" G "<0>G3H 6A30>"
+"^(\d{5}-\d{4}|\d{5})$|^([a-zA-Z]\d[a-zA-Z] \d[a-zA-Z]\d)$" "78754-12aA"
+"^(\d{5}-\d{4}|\d{5})$|^([a-zA-Z]\d[a-zA-Z] \d[a-zA-Z]\d)$" "7875A"
+"^(\d{5}-\d{4}|\d{5})$|^([a-zA-Z]\d[a-zA-Z] \d[a-zA-Z]\d)$" "g3h6a3"
+#"^([\w\-\.]+)@((\[([0-9]{1,3}\.){3}[0-9]{1,3}\])|(([\w\-]+\.)+)([a-zA-Z]{2,4}))$" G "<0>bob@somewhere.com0>" # TODO: \w in pattern
+#"^([\w\-\.]+)@((\[([0-9]{1,3}\.){3}[0-9]{1,3}\])|(([\w\-]+\.)+)([a-zA-Z]{2,4}))$" G "<0>bob.jones@[1.1.1.1]0 # TODO: \w in pattern>"
+#"^([\w\-\.]+)@((\[([0-9]{1,3}\.){3}[0-9]{1,3}\])|(([\w\-]+\.)+)([a-zA-Z]{2,4}))$" G "<0>bob@a.b.c.d.info0>" # TODO: \w in pattern
+#"^([\w\-\.]+)@((\[([0-9]{1,3}\.){3}[0-9]{1,3}\])|(([\w\-]+\.)+)([a-zA-Z]{2,4}))$" "bob@com" # TODO: \w in pattern
+#"^([\w\-\.]+)@((\[([0-9]{1,3}\.){3}[0-9]{1,3}\])|(([\w\-]+\.)+)([a-zA-Z]{2,4}))$" "bob.jones@some.where" # TODO: \w in pattern
+#"^([\w\-\.]+)@((\[([0-9]{1,3}\.){3}[0-9]{1,3}\])|(([\w\-]+\.)+)([a-zA-Z]{2,4}))$" "bob@1.1.1.123" # TODO: \w in pattern
+#"^(([-\w \.]+)|(""[-\w \.]+"") )?<([\w\-\.]+)@((\[([0-9]{1,3}\.){3}[0-9]{1,3}\])|(([\w\-]+\.)+)([a-zA-Z]{2,4}))>$" G "<0>0>" # TODO: \w in pattern
+#"^(([-\w \.]+)|(""[-\w \.]+"") )?<([\w\-\.]+)@((\[([0-9]{1,3}\.){3}[0-9]{1,3}\])|(([\w\-]+\.)+)([a-zA-Z]{2,4}))>$" G "<0>bob A. jones | | |